1 /* MN10300 Page table manipulators and constants
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 * The Linux memory management assumes a three-level page table setup. On
13 * the i386, we use that, but "fold" the mid level into the top-level page
14 * table, so that we physically have the same two-level page table as the
17 * This file contains the functions and defines necessary to modify and use
18 * the i386 page table tree for the purposes of the MN10300 TLB handler
21 #ifndef _ASM_PGTABLE_H
22 #define _ASM_PGTABLE_H
24 #include <asm/cpu-regs.h>
27 #include <asm/processor.h>
28 #include <asm/cache.h>
29 #include <linux/threads.h>
31 #include <asm/bitops.h>
33 #include <linux/slab.h>
34 #include <linux/list.h>
35 #include <linux/spinlock.h>
38 * ZERO_PAGE is a global shared page that is always zero: used
39 * for zero-mapped memory areas etc..
41 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
42 extern unsigned long empty_zero_page
[1024];
43 extern spinlock_t pgd_lock
;
44 extern struct page
*pgd_list
;
46 extern void pmd_ctor(void *, struct kmem_cache
*, unsigned long);
47 extern void pgtable_cache_init(void);
48 extern void paging_init(void);
50 #endif /* !__ASSEMBLY__ */
53 * The Linux mn10300 paging architecture only implements both the traditional
56 #define PGDIR_SHIFT 22
57 #define PTRS_PER_PGD 1024
58 #define PTRS_PER_PUD 1 /* we don't really have any PUD physically */
59 #define PTRS_PER_PMD 1 /* we don't really have any PMD physically */
60 #define PTRS_PER_PTE 1024
62 #define PGD_SIZE PAGE_SIZE
63 #define PMD_SIZE (1UL << PMD_SHIFT)
64 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
65 #define PGDIR_MASK (~(PGDIR_SIZE - 1))
67 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
68 #define FIRST_USER_ADDRESS 0
70 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
71 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - USER_PGD_PTRS)
73 #define TWOLEVEL_PGDIR_SHIFT 22
74 #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
75 #define BOOT_KERNEL_PGD_PTRS (1024 - BOOT_USER_PGD_PTRS)
78 extern pgd_t swapper_pg_dir
[PTRS_PER_PGD
];
82 * Unfortunately, due to the way the MMU works on the MN10300, the vmalloc VM
83 * area has to be in the lower half of the virtual address range (the upper
84 * half is not translated through the TLB).
86 * So in this case, the vmalloc area goes at the bottom of the address map
87 * (leaving a hole at the very bottom to catch addressing errors), and
88 * userspace starts immediately above.
90 * The vmalloc() routines also leaves a hole of 4kB between each vmalloced
91 * area to catch addressing errors.
93 #define VMALLOC_OFFSET (8 * 1024 * 1024)
94 #define VMALLOC_START (0x70000000)
95 #define VMALLOC_END (0x7C000000)
98 extern pte_t kernel_vmalloc_ptes
[(VMALLOC_END
- VMALLOC_START
) / PAGE_SIZE
];
101 /* IPTEL/DPTEL bit assignments */
102 #define _PAGE_BIT_VALID xPTEL_V_BIT
103 #define _PAGE_BIT_ACCESSED xPTEL_UNUSED1_BIT /* mustn't be loaded into IPTEL/DPTEL */
104 #define _PAGE_BIT_NX xPTEL_UNUSED2_BIT /* mustn't be loaded into IPTEL/DPTEL */
105 #define _PAGE_BIT_CACHE xPTEL_C_BIT
106 #define _PAGE_BIT_PRESENT xPTEL_PV_BIT
107 #define _PAGE_BIT_DIRTY xPTEL_D_BIT
108 #define _PAGE_BIT_GLOBAL xPTEL_G_BIT
110 #define _PAGE_VALID xPTEL_V
111 #define _PAGE_ACCESSED xPTEL_UNUSED1
112 #define _PAGE_NX xPTEL_UNUSED2 /* no-execute bit */
113 #define _PAGE_CACHE xPTEL_C
114 #define _PAGE_PRESENT xPTEL_PV
115 #define _PAGE_DIRTY xPTEL_D
116 #define _PAGE_PROT xPTEL_PR
117 #define _PAGE_PROT_RKNU xPTEL_PR_ROK
118 #define _PAGE_PROT_WKNU xPTEL_PR_RWK
119 #define _PAGE_PROT_RKRU xPTEL_PR_ROK_ROU
120 #define _PAGE_PROT_WKRU xPTEL_PR_RWK_ROU
121 #define _PAGE_PROT_WKWU xPTEL_PR_RWK_RWU
122 #define _PAGE_GLOBAL xPTEL_G
123 #define _PAGE_PSE xPTEL_PS_4Mb /* 4MB page */
125 #define _PAGE_FILE xPTEL_UNUSED1_BIT /* set:pagecache unset:swap */
127 #define __PAGE_PROT_UWAUX 0x040
128 #define __PAGE_PROT_USER 0x080
129 #define __PAGE_PROT_WRITE 0x100
131 #define _PAGE_PRESENTV (_PAGE_PRESENT|_PAGE_VALID)
132 #define _PAGE_PROTNONE 0x000 /* If not present */
136 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
138 #define _PAGE_TABLE (_PAGE_PRESENTV | _PAGE_PROT_WKNU | _PAGE_ACCESSED | _PAGE_DIRTY)
139 #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
141 #define __PAGE_NONE (_PAGE_PRESENTV | _PAGE_PROT_RKNU | _PAGE_ACCESSED | _PAGE_CACHE)
142 #define __PAGE_SHARED (_PAGE_PRESENTV | _PAGE_PROT_WKWU | _PAGE_ACCESSED | _PAGE_CACHE)
143 #define __PAGE_COPY (_PAGE_PRESENTV | _PAGE_PROT_RKRU | _PAGE_ACCESSED | _PAGE_CACHE)
144 #define __PAGE_READONLY (_PAGE_PRESENTV | _PAGE_PROT_RKRU | _PAGE_ACCESSED | _PAGE_CACHE)
146 #define PAGE_NONE __pgprot(__PAGE_NONE | _PAGE_NX)
147 #define PAGE_SHARED_NOEXEC __pgprot(__PAGE_SHARED | _PAGE_NX)
148 #define PAGE_COPY_NOEXEC __pgprot(__PAGE_COPY | _PAGE_NX)
149 #define PAGE_READONLY_NOEXEC __pgprot(__PAGE_READONLY | _PAGE_NX)
150 #define PAGE_SHARED_EXEC __pgprot(__PAGE_SHARED)
151 #define PAGE_COPY_EXEC __pgprot(__PAGE_COPY)
152 #define PAGE_READONLY_EXEC __pgprot(__PAGE_READONLY)
153 #define PAGE_COPY PAGE_COPY_NOEXEC
154 #define PAGE_READONLY PAGE_READONLY_NOEXEC
155 #define PAGE_SHARED PAGE_SHARED_EXEC
157 #define __PAGE_KERNEL_BASE (_PAGE_PRESENTV | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
159 #define __PAGE_KERNEL (__PAGE_KERNEL_BASE | _PAGE_PROT_WKNU | _PAGE_CACHE | _PAGE_NX)
160 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL_BASE | _PAGE_PROT_WKNU | _PAGE_NX)
161 #define __PAGE_KERNEL_EXEC (__PAGE_KERNEL & ~_PAGE_NX)
162 #define __PAGE_KERNEL_RO (__PAGE_KERNEL_BASE | _PAGE_PROT_RKNU | _PAGE_CACHE | _PAGE_NX)
163 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
164 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
166 #define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
167 #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
168 #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
169 #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
170 #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
171 #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
174 * Whilst the MN10300 can do page protection for execute (given separate data
175 * and insn TLBs), we are not supporting it at the moment. Write permission,
176 * however, always implies read permission (but not execute permission).
178 #define __P000 PAGE_NONE
179 #define __P001 PAGE_READONLY_NOEXEC
180 #define __P010 PAGE_COPY_NOEXEC
181 #define __P011 PAGE_COPY_NOEXEC
182 #define __P100 PAGE_READONLY_EXEC
183 #define __P101 PAGE_READONLY_EXEC
184 #define __P110 PAGE_COPY_EXEC
185 #define __P111 PAGE_COPY_EXEC
187 #define __S000 PAGE_NONE
188 #define __S001 PAGE_READONLY_NOEXEC
189 #define __S010 PAGE_SHARED_NOEXEC
190 #define __S011 PAGE_SHARED_NOEXEC
191 #define __S100 PAGE_READONLY_EXEC
192 #define __S101 PAGE_READONLY_EXEC
193 #define __S110 PAGE_SHARED_EXEC
194 #define __S111 PAGE_SHARED_EXEC
197 * Define this to warn about kernel memory accesses that are
198 * done without a 'verify_area(VERIFY_WRITE,..)'
200 #undef TEST_VERIFY_AREA
202 #define pte_present(x) (pte_val(x) & _PAGE_VALID)
203 #define pte_clear(mm, addr, xp) \
205 set_pte_at((mm), (addr), (xp), __pte(0)); \
208 #define pmd_none(x) (!pmd_val(x))
209 #define pmd_present(x) (!pmd_none(x))
210 #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
214 #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
219 * The following only work if pte_present() is true.
220 * Undefined behaviour if not..
222 static inline int pte_user(pte_t pte
) { return pte_val(pte
) & __PAGE_PROT_USER
; }
223 static inline int pte_read(pte_t pte
) { return pte_val(pte
) & __PAGE_PROT_USER
; }
224 static inline int pte_dirty(pte_t pte
) { return pte_val(pte
) & _PAGE_DIRTY
; }
225 static inline int pte_young(pte_t pte
) { return pte_val(pte
) & _PAGE_ACCESSED
; }
226 static inline int pte_write(pte_t pte
) { return pte_val(pte
) & __PAGE_PROT_WRITE
; }
227 static inline int pte_special(pte_t pte
){ return 0; }
230 * The following only works if pte_present() is not true.
232 static inline int pte_file(pte_t pte
) { return pte_val(pte
) & _PAGE_FILE
; }
234 static inline pte_t
pte_rdprotect(pte_t pte
)
236 pte_val(pte
) &= ~(__PAGE_PROT_USER
|__PAGE_PROT_UWAUX
); return pte
;
238 static inline pte_t
pte_exprotect(pte_t pte
)
240 pte_val(pte
) |= _PAGE_NX
; return pte
;
243 static inline pte_t
pte_wrprotect(pte_t pte
)
245 pte_val(pte
) &= ~(__PAGE_PROT_WRITE
|__PAGE_PROT_UWAUX
); return pte
;
248 static inline pte_t
pte_mkclean(pte_t pte
) { pte_val(pte
) &= ~_PAGE_DIRTY
; return pte
; }
249 static inline pte_t
pte_mkold(pte_t pte
) { pte_val(pte
) &= ~_PAGE_ACCESSED
; return pte
; }
250 static inline pte_t
pte_mkdirty(pte_t pte
) { pte_val(pte
) |= _PAGE_DIRTY
; return pte
; }
251 static inline pte_t
pte_mkyoung(pte_t pte
) { pte_val(pte
) |= _PAGE_ACCESSED
; return pte
; }
252 static inline pte_t
pte_mkexec(pte_t pte
) { pte_val(pte
) &= ~_PAGE_NX
; return pte
; }
254 static inline pte_t
pte_mkread(pte_t pte
)
256 pte_val(pte
) |= __PAGE_PROT_USER
;
258 pte_val(pte
) |= __PAGE_PROT_UWAUX
;
261 static inline pte_t
pte_mkwrite(pte_t pte
)
263 pte_val(pte
) |= __PAGE_PROT_WRITE
;
264 if (pte_val(pte
) & __PAGE_PROT_USER
)
265 pte_val(pte
) |= __PAGE_PROT_UWAUX
;
269 static inline pte_t
pte_mkspecial(pte_t pte
) { return pte
; }
271 #define pte_ERROR(e) \
272 printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \
273 __FILE__, __LINE__, pte_val(e))
274 #define pgd_ERROR(e) \
275 printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
276 __FILE__, __LINE__, pgd_val(e))
279 * The "pgd_xxx()" functions here are trivial for a folded two-level
280 * setup: the pgd is never bad, and a pmd always exists (as it's folded
281 * into the pgd entry)
283 #define pgd_clear(xp) do { } while (0)
286 * Certain architectures need to do special things when PTEs
287 * within a page table are directly modified. Thus, the following
288 * hook is made available.
290 #define set_pte(pteptr, pteval) (*(pteptr) = pteval)
291 #define set_pte_at(mm, addr, ptep, pteval) set_pte((ptep), (pteval))
292 #define set_pte_atomic(pteptr, pteval) set_pte((pteptr), (pteval))
295 * (pmds are folded into pgds so this doesn't get actually called,
296 * but the define is needed for a generic inline function.)
298 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
300 #define ptep_get_and_clear(mm, addr, ptep) \
301 __pte(xchg(&(ptep)->pte, 0))
302 #define pte_same(a, b) (pte_val(a) == pte_val(b))
303 #define pte_page(x) pfn_to_page(pte_pfn(x))
304 #define pte_none(x) (!pte_val(x))
305 #define pte_pfn(x) ((unsigned long) (pte_val(x) >> PAGE_SHIFT))
306 #define __pfn_addr(pfn) ((pfn) << PAGE_SHIFT)
307 #define pfn_pte(pfn, prot) __pte(__pfn_addr(pfn) | pgprot_val(prot))
308 #define pfn_pmd(pfn, prot) __pmd(__pfn_addr(pfn) | pgprot_val(prot))
311 * All present user pages are user-executable:
313 static inline int pte_exec(pte_t pte
)
315 return pte_user(pte
);
319 * All present pages are kernel-executable:
321 static inline int pte_exec_kernel(pte_t pte
)
327 * Bits 0 and 1 are taken, split up the 29 bits of offset
330 #define PTE_FILE_MAX_BITS 29
332 #define pte_to_pgoff(pte) (pte_val(pte) >> 2)
333 #define pgoff_to_pte(off) __pte((off) << 2 | _PAGE_FILE)
335 /* Encode and de-code a swap entry */
336 #define __swp_type(x) (((x).val >> 2) & 0x3f)
337 #define __swp_offset(x) ((x).val >> 8)
338 #define __swp_entry(type, offset) \
339 ((swp_entry_t) { ((type) << 2) | ((offset) << 8) })
340 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
341 #define __swp_entry_to_pte(x) __pte((x).val)
344 int ptep_test_and_clear_dirty(struct vm_area_struct
*vma
, unsigned long addr
,
347 if (!pte_dirty(*ptep
))
349 return test_and_clear_bit(_PAGE_BIT_DIRTY
, &ptep
->pte
);
353 int ptep_test_and_clear_young(struct vm_area_struct
*vma
, unsigned long addr
,
356 if (!pte_young(*ptep
))
358 return test_and_clear_bit(_PAGE_BIT_ACCESSED
, &ptep
->pte
);
362 void ptep_set_wrprotect(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
364 pte_val(*ptep
) &= ~(__PAGE_PROT_WRITE
|__PAGE_PROT_UWAUX
);
367 static inline void ptep_mkdirty(pte_t
*ptep
)
369 set_bit(_PAGE_BIT_DIRTY
, &ptep
->pte
);
373 * Macro to mark a page protection value as "uncacheable". On processors which
374 * do not support it, this is a no-op.
376 #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_CACHE)
380 * Conversion functions: convert a page and protection to a page entry,
381 * and a page entry and page directory to the page they refer to.
384 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
385 #define mk_pte_huge(entry) \
386 ((entry).pte |= _PAGE_PRESENT | _PAGE_PSE | _PAGE_VALID)
388 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
390 pte_val(pte
) &= _PAGE_CHG_MASK
;
391 pte_val(pte
) |= pgprot_val(newprot
);
395 #define page_pte(page) page_pte_prot((page), __pgprot(0))
397 #define pmd_page_kernel(pmd) \
398 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
400 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
402 #define pmd_large(pmd) \
403 ((pmd_val(pmd) & (_PAGE_PSE | _PAGE_PRESENT)) == \
404 (_PAGE_PSE | _PAGE_PRESENT))
407 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
409 * this macro returns the index of the entry in the pgd page which would
410 * control the given virtual address
412 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
415 * pgd_offset() returns a (pgd_t *)
416 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
418 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
421 * a shortcut which implies the use of the kernel's pgd, instead
424 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
427 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
429 * this macro returns the index of the entry in the pmd page which would
430 * control the given virtual address
432 #define pmd_index(address) \
433 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
436 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
438 * this macro returns the index of the entry in the pte page which would
439 * control the given virtual address
441 #define pte_index(address) \
442 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
444 #define pte_offset_kernel(dir, address) \
445 ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
448 * Make a given kernel text page executable/non-executable.
449 * Returns the previous executability setting of that page (which
450 * is used to restore the previous state). Used by the SMP bootup code.
451 * NOTE: this is an __init function for security reasons.
453 static inline int set_kernel_exec(unsigned long vaddr
, int enable
)
458 #define pte_offset_map(dir, address) \
459 ((pte_t *) page_address(pmd_page(*(dir))) + pte_index(address))
460 #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
461 #define pte_unmap(pte) do {} while (0)
462 #define pte_unmap_nested(pte) do {} while (0)
465 * The MN10300 has external MMU info in the form of a TLB: this is adapted from
466 * the kernel page tables containing the necessary information by tlb-mn10300.S
468 extern void update_mmu_cache(struct vm_area_struct
*vma
,
469 unsigned long address
, pte_t pte
);
471 #endif /* !__ASSEMBLY__ */
473 #define kern_addr_valid(addr) (1)
475 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
476 remap_pfn_range((vma), (vaddr), (pfn), (size), (prot))
478 #define MK_IOSPACE_PFN(space, pfn) (pfn)
479 #define GET_IOSPACE(pfn) 0
480 #define GET_PFN(pfn) (pfn)
482 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
483 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
484 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
485 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
486 #define __HAVE_ARCH_PTEP_MKDIRTY
487 #define __HAVE_ARCH_PTE_SAME
488 #include <asm-generic/pgtable.h>
490 #endif /* !__ASSEMBLY__ */
492 #endif /* _ASM_PGTABLE_H */