- pre1: (for ISDN synchronization _ONLY_! Not complete!)
[davej-history.git] / include / asm-m68k / pgtable.h
blob655d604d24c2dfe469977d68abf656fd35af6450
1 #ifndef _M68K_PGTABLE_H
2 #define _M68K_PGTABLE_H
4 #include <linux/config.h>
5 #include <asm/setup.h>
7 #ifndef __ASSEMBLY__
8 #include <asm/processor.h>
9 #include <linux/threads.h>
12 * This file contains the functions and defines necessary to modify and use
13 * the m68k page table tree.
16 #include <asm/virtconvert.h>
18 /* Certain architectures need to do special things when pte's
19 * within a page table are directly modified. Thus, the following
20 * hook is made available.
22 #define set_pte(pteptr, pteval) \
23 do{ \
24 *(pteptr) = (pteval); \
25 } while(0)
28 /* PMD_SHIFT determines the size of the area a second-level page table can map */
29 #define PMD_SHIFT 22
30 #define PMD_SIZE (1UL << PMD_SHIFT)
31 #define PMD_MASK (~(PMD_SIZE-1))
33 /* PGDIR_SHIFT determines what a third-level page table entry can map */
34 #define PGDIR_SHIFT 25
35 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
36 #define PGDIR_MASK (~(PGDIR_SIZE-1))
39 * entries per page directory level: the m68k is configured as three-level,
40 * so we do have PMD level physically.
42 #define PTRS_PER_PTE 1024
43 #define PTRS_PER_PMD 8
44 #define PTRS_PER_PGD 128
45 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
46 #define FIRST_USER_PGD_NR 0
48 /* Virtual address region for use by kernel_map() */
49 #define KMAP_START 0xd0000000
50 #define KMAP_END 0xf0000000
52 /* Just any arbitrary offset to the start of the vmalloc VM area: the
53 * current 8MB value just means that there will be a 8MB "hole" after the
54 * physical memory until the kernel virtual memory starts. That means that
55 * any out-of-bounds memory accesses will hopefully be caught.
56 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
57 * area for the same reason. ;)
59 #define VMALLOC_OFFSET (8*1024*1024)
60 #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
61 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
62 #define VMALLOC_END KMAP_START
64 #endif /* __ASSEMBLY__ */
67 * Definitions for MMU descriptors
69 #define _PAGE_PRESENT 0x001
70 #define _PAGE_SHORT 0x002
71 #define _PAGE_RONLY 0x004
72 #define _PAGE_ACCESSED 0x008
73 #define _PAGE_DIRTY 0x010
74 #define _PAGE_SUPER 0x080 /* 68040 supervisor only */
75 #define _PAGE_FAKE_SUPER 0x200 /* fake supervisor only on 680[23]0 */
76 #define _PAGE_GLOBAL040 0x400 /* 68040 global bit, used for kva descs */
77 #define _PAGE_COW 0x800 /* implemented in software */
78 #define _PAGE_NOCACHE030 0x040 /* 68030 no-cache mode */
79 #define _PAGE_NOCACHE 0x060 /* 68040 cache mode, non-serialized */
80 #define _PAGE_NOCACHE_S 0x040 /* 68040 no-cache mode, serialized */
81 #define _PAGE_CACHE040 0x020 /* 68040 cache mode, cachable, copyback */
82 #define _PAGE_CACHE040W 0x000 /* 68040 cache mode, cachable, write-through */
84 /* Page protection values within PTE. */
85 #define SUN3_PAGE_VALID (0x80000000)
86 #define SUN3_PAGE_WRITEABLE (0x40000000)
87 #define SUN3_PAGE_SYSTEM (0x20000000)
88 #define SUN3_PAGE_NOCACHE (0x10000000)
89 #define SUN3_PAGE_ACCESSED (0x02000000)
90 #define SUN3_PAGE_MODIFIED (0x01000000)
92 #define _DESCTYPE_MASK 0x003
94 #define _CACHEMASK040 (~0x060)
95 #define _TABLE_MASK (0xfffffe00)
97 #define _PAGE_TABLE (_PAGE_SHORT)
98 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE)
100 #ifndef __ASSEMBLY__
102 /* This is the cache mode to be used for pages containing page descriptors for
103 * processors >= '040. It is in pte_mknocache(), and the variable is defined
104 * and initialized in head.S */
105 extern int m68k_pgtable_cachemode;
107 /* This is the cache mode for normal pages, for supervisor access on
108 * processors >= '040. It is used in pte_mkcache(), and the variable is
109 * defined and initialized in head.S */
111 #if defined(CONFIG_060_WRITETHROUGH)
112 extern int m68k_supervisor_cachemode;
113 #else
114 #define m68k_supervisor_cachemode _PAGE_CACHE040
115 #endif
117 #if defined(CPU_M68040_OR_M68060_ONLY)
118 #define mm_cachebits _PAGE_CACHE040
119 #elif defined(CPU_M68020_OR_M68030_ONLY)
120 #define mm_cachebits 0
121 #else
122 extern unsigned long mm_cachebits;
123 #endif
125 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | mm_cachebits)
126 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | mm_cachebits)
127 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | mm_cachebits)
128 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | mm_cachebits)
129 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | mm_cachebits)
131 /* Alternate definitions that are compile time constants, for
132 initializing protection_map. The cachebits are fixed later. */
133 #define PAGE_NONE_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
134 #define PAGE_SHARED_C __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
135 #define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
136 #define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
139 * The m68k can't do page protection for execute, and considers that the same are read.
140 * Also, write permissions imply read permissions. This is the closest we can get..
142 #define __P000 PAGE_NONE_C
143 #define __P001 PAGE_READONLY_C
144 #define __P010 PAGE_COPY_C
145 #define __P011 PAGE_COPY_C
146 #define __P100 PAGE_READONLY_C
147 #define __P101 PAGE_READONLY_C
148 #define __P110 PAGE_COPY_C
149 #define __P111 PAGE_COPY_C
151 #define __S000 PAGE_NONE_C
152 #define __S001 PAGE_READONLY_C
153 #define __S010 PAGE_SHARED_C
154 #define __S011 PAGE_SHARED_C
155 #define __S100 PAGE_READONLY_C
156 #define __S101 PAGE_READONLY_C
157 #define __S110 PAGE_SHARED_C
158 #define __S111 PAGE_SHARED_C
160 /* zero page used for uninitialized stuff */
161 extern unsigned long empty_zero_page;
164 * BAD_PAGETABLE is used when we need a bogus page-table, while
165 * BAD_PAGE is used for a bogus page.
167 * ZERO_PAGE is a global shared page that is always zero: used
168 * for zero-mapped memory areas etc..
170 extern pte_t __bad_page(void);
171 extern pte_t * __bad_pagetable(void);
173 #define BAD_PAGETABLE __bad_pagetable()
174 #define BAD_PAGE __bad_page()
175 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
177 /* number of bits that fit into a memory pointer */
178 #define BITS_PER_PTR (8*sizeof(unsigned long))
180 /* to align the pointer to a pointer address */
181 #define PTR_MASK (~(sizeof(void*)-1))
183 /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
184 /* 64-bit machines, beware! SRB. */
185 #define SIZEOF_PTR_LOG2 2
188 * Conversion functions: convert a page and protection to a page entry,
189 * and a page entry and page directory to the page they refer to.
191 #define __mk_pte(page, pgprot) \
192 ({ \
193 pte_t __pte; \
195 pte_val(__pte) = __pa((page) + pgprot_val(pgprot); \
196 __pte; \
198 #define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot))
199 #define mk_pte_phys(physpage, pgprot) \
200 ({ \
201 pte_t __pte; \
203 pte_val(__pte) = (physpage) + pgprot_val(pgprot); \
204 __pte; \
207 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
208 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
210 extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
212 unsigned long ptbl = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED;
213 unsigned long *ptr = pmdp->pmd;
214 short i = 16;
215 while (--i >= 0) {
216 *ptr++ = ptbl;
217 ptbl += (sizeof(pte_t)*PTRS_PER_PTE/16);
221 extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
222 { pgd_val(*pgdp) = _PAGE_TABLE | _PAGE_ACCESSED | __pa(pmdp); }
224 #define __pte_page(pte) ((unsigned long)__va(pte_val(pte) & PAGE_MASK))
225 #define __pmd_page(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK))
226 #define __pgd_page(pgd) ((unsigned long)__va(pgd_val(pgd) & _TABLE_MASK))
228 #define pte_none(pte) (!pte_val(pte))
229 #define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_FAKE_SUPER))
230 #define pte_clear(ptep) ({ pte_val(*(ptep)) = 0; })
232 #define pmd_none(pmd) (!pmd_val(pmd))
233 #define pmd_bad(pmd) ((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE)
234 #define pmd_present(pmd) (pmd_val(pmd) & _PAGE_TABLE)
235 #define pmd_clear(pmdp) ({ \
236 unsigned long *__ptr = pmdp->pmd; \
237 short __i = 16; \
238 while (--__i >= 0) \
239 *__ptr++ = 0; \
242 #define pgd_none(pgd) (!pgd_val(pgd))
243 #define pgd_bad(pgd) ((pgd_val(pgd) & _DESCTYPE_MASK) != _PAGE_TABLE)
244 #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_TABLE)
245 #define pgd_clear(pgdp) ({ pgd_val(*pgdp) = 0; })
247 /* Permanent address of a page. */
248 #define page_address(page) ((page)->virtual)
249 #define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
250 #define pte_page(pte) (mem_map+((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT))
252 #define pte_ERROR(e) \
253 printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
254 #define pmd_ERROR(e) \
255 printk("%s:%d: bad pmd %p(%08lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
256 #define pgd_ERROR(e) \
257 printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
260 * The following only work if pte_present() is true.
261 * Undefined behaviour if not..
263 extern inline int pte_read(pte_t pte) { return 1; }
264 extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RONLY); }
265 extern inline int pte_exec(pte_t pte) { return 1; }
266 extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
267 extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
269 extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_RONLY; return pte; }
270 extern inline pte_t pte_rdprotect(pte_t pte) { return pte; }
271 extern inline pte_t pte_exprotect(pte_t pte) { return pte; }
272 extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
273 extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
274 extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_RONLY; return pte; }
275 extern inline pte_t pte_mkread(pte_t pte) { return pte; }
276 extern inline pte_t pte_mkexec(pte_t pte) { return pte; }
277 extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
278 extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
279 extern inline pte_t pte_mknocache(pte_t pte)
281 pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_pgtable_cachemode;
282 return pte;
284 extern inline pte_t pte_mkcache(pte_t pte) { pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_supervisor_cachemode; return pte; }
286 #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
288 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
290 /* to find an entry in a page-table-directory */
291 extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
293 return mm->pgd + pgd_index(address);
296 #define swapper_pg_dir kernel_pg_dir
297 extern pgd_t kernel_pg_dir[128];
299 extern inline pgd_t * pgd_offset_k(unsigned long address)
301 return kernel_pg_dir + (address >> PGDIR_SHIFT);
305 /* Find an entry in the second-level page table.. */
306 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
308 return (pmd_t *)__pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD-1));
311 /* Find an entry in the third-level page table.. */
312 extern inline pte_t * pte_offset(pmd_t * pmdp, unsigned long address)
314 return (pte_t *)__pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
318 * Allocate and free page tables. The xxx_kernel() versions are
319 * used to allocate a kernel page table - this turns on ASN bits
320 * if any.
323 /* Prior to calling these routines, the page should have been flushed
324 * from both the cache and ATC, or the CPU might not notice that the
325 * cache setting for the page has been changed. -jskov
327 static inline void nocache_page (unsigned long vaddr)
329 if (CPU_IS_040_OR_060) {
330 pgd_t *dir;
331 pmd_t *pmdp;
332 pte_t *ptep;
334 dir = pgd_offset_k(vaddr);
335 pmdp = pmd_offset(dir,vaddr);
336 ptep = pte_offset(pmdp,vaddr);
337 *ptep = pte_mknocache(*ptep);
341 static inline void cache_page (unsigned long vaddr)
343 if (CPU_IS_040_OR_060) {
344 pgd_t *dir;
345 pmd_t *pmdp;
346 pte_t *ptep;
348 dir = pgd_offset_k(vaddr);
349 pmdp = pmd_offset(dir,vaddr);
350 ptep = pte_offset(pmdp,vaddr);
351 *ptep = pte_mkcache(*ptep);
357 * Check if the addr/len goes up to the end of a physical
358 * memory chunk. Used for DMA functions.
360 #ifdef CONFIG_SINGLE_MEMORY_CHUNK
362 * It makes no sense to consider whether we cross a memory boundary if
363 * we support just one physical chunk of memory.
365 extern inline int mm_end_of_chunk (unsigned long addr, int len)
367 return 0;
369 #else
370 int mm_end_of_chunk (unsigned long addr, int len);
371 #endif
373 extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode);
376 * The m68k doesn't have any external MMU info: the kernel page
377 * tables contain all the necessary information.
379 extern inline void update_mmu_cache(struct vm_area_struct * vma,
380 unsigned long address, pte_t pte)
384 /* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
385 #define SWP_TYPE(x) (((x).val >> 1) & 0xff)
386 #define SWP_OFFSET(x) ((x).val >> 10)
387 #define SWP_ENTRY(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 10) })
388 #define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
389 #define swp_entry_to_pte(x) ((pte_t) { (x).val })
391 #endif /* __ASSEMBLY__ */
393 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
394 #define PageSkip(page) (0)
395 #define kern_addr_valid(addr) (1)
397 #define io_remap_page_range remap_page_range
399 #include <asm-generic/pgtable.h>
401 #endif /* _M68K_PGTABLE_H */