Import 2.4.0-test6pre6
[davej-history.git] / include / asm-s390 / pgtable.h
blobfd4792961aa504cdbf30b9a4bcccf98b607f321a
1 /*
2 * include/asm-s390/pgtable.h
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner
8 * Derived from "include/asm-i386/pgtable.h"
9 */
11 #ifndef _ASM_S390_PGTABLE_H
12 #define _ASM_S390_PGTABLE_H
15 * The Linux memory management assumes a three-level page table setup. On
16 * the S390, we use that, but "fold" the mid level into the top-level page
17 * table, so that we physically have the same two-level page table as the
18 * S390 mmu expects.
20 * This file contains the functions and defines necessary to modify and use
21 * the S390 page table tree.
23 #ifndef __ASSEMBLY__
24 #include <asm/processor.h>
25 #include <linux/tasks.h>
27 extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
29 /* Caches aren't brain-dead on S390. */
30 #define flush_cache_all() do { } while (0)
31 #define flush_cache_mm(mm) do { } while (0)
32 #define flush_cache_range(mm, start, end) do { } while (0)
33 #define flush_cache_page(vma, vmaddr) do { } while (0)
34 #define flush_page_to_ram(page) do { } while (0)
35 #define flush_dcache_page(page) do { } while (0)
36 #define flush_icache_range(start, end) do { } while (0)
37 #define flush_icache_page(vma,pg) do { } while (0)
40 * ZERO_PAGE is a global shared page that is always zero: used
41 * for zero-mapped memory areas etc..
43 extern unsigned long empty_zero_page[1024];
44 #define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
45 #endif /* !__ASSEMBLY__ */
47 /* Certain architectures need to do special things when PTEs
48 * within a page table are directly modified. Thus, the following
49 * hook is made available.
51 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
53 /* PMD_SHIFT determines the size of the area a second-level page table can map */
54 #define PMD_SHIFT 22
55 #define PMD_SIZE (1UL << PMD_SHIFT)
56 #define PMD_MASK (~(PMD_SIZE-1))
58 /* PGDIR_SHIFT determines what a third-level page table entry can map */
59 #define PGDIR_SHIFT 22
60 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
61 #define PGDIR_MASK (~(PGDIR_SIZE-1))
64 * entries per page directory level: the S390 is two-level, so
65 * we don't really have any PMD directory physically.
66 * for S390 segment-table entries are combined to one PGD
67 * that leads to 1024 pte per pgd
69 #define PTRS_PER_PTE 1024
70 #define PTRS_PER_PMD 1
71 #define PTRS_PER_PGD 512
75 * pgd entries used up by user/kernel:
77 #define USER_PTRS_PER_PGD 512
78 #define USER_PGD_PTRS 512
79 #define KERNEL_PGD_PTRS 512
80 #define FIRST_USER_PGD_NR 0
82 #define pte_ERROR(e) \
83 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
84 #define pmd_ERROR(e) \
85 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
86 #define pgd_ERROR(e) \
87 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
89 #ifndef __ASSEMBLY__
90 /* Just any arbitrary offset to the start of the vmalloc VM area: the
91 * current 8MB value just means that there will be a 8MB "hole" after the
92 * physical memory until the kernel virtual memory starts. That means that
93 * any out-of-bounds memory accesses will hopefully be caught.
94 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
95 * area for the same reason. ;)
97 #define VMALLOC_OFFSET (8*1024*1024)
98 #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
99 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
100 #define VMALLOC_END (0x7fffffffL)
104 * A pagetable entry of S390 has following format:
106 * | PFRA | | OS |
107 * 0 0IP0
108 * 00000000001111111111222222222233
109 * 01234567890123456789012345678901
111 * I Page-Invalid Bit: Page is not available for address-translation
112 * P Page-Protection Bit: Store access not possible for page
116 * A segmenttable entry of S390 has following format:
118 * | P-table origin | |PTL
119 * 0 IC
120 * 00000000001111111111222222222233
121 * 01234567890123456789012345678901
123 * I Segment-Invalid Bit: Segment is not available for address-translation
124 * C Common-Segment Bit: Segment is not private (PoP 3-30)
125 * PTL Page-Table-Length: Length of Page-table (PTL+1*16 entries -> up to 256 entries)
129 * The segmenttable origin of S390 has following format:
131 * |S-table origin | | STL |
132 * X **GPS
133 * 00000000001111111111222222222233
134 * 01234567890123456789012345678901
136 * X Space-Switch event:
137 * G Segment-Invalid Bit: *
138 * P Private-Space Bit: Segment is not private (PoP 3-30)
139 * S Storage-Alteration:
140 * STL Segment-Table-Length: Length of Page-table (STL+1*16 entries -> up to 2048 entries)
143 #define _PAGE_PRESENT 0x001 /* Software */
144 #define _PAGE_ACCESSED 0x002 /* Software accessed */
145 #define _PAGE_DIRTY 0x004 /* Software dirty */
146 #define _PAGE_RO 0x200 /* HW read-only */
147 #define _PAGE_INVALID 0x400 /* HW invalid */
149 #define _PAGE_TABLE_LEN 0xf /* only full page-tables */
150 #define _PAGE_TABLE_COM 0x10 /* common page-table */
151 #define _PAGE_TABLE_INV 0x20 /* invalid page-table */
152 #define _SEG_PRESENT 0x001 /* Software (overlap with PTL) */
154 #define _USER_SEG_TABLE_LEN 0x7f /* user-segment-table up to 2 GB */
155 #define _KERNEL_SEG_TABLE_LEN 0x7f /* kernel-segment-table up to 2 GB */
158 * User and Kernel pagetables are identical
161 #define _PAGE_TABLE (_PAGE_TABLE_LEN )
162 #define _KERNPG_TABLE (_PAGE_TABLE_LEN )
165 * The Kernel segment-tables includes the User segment-table
168 #define _SEGMENT_TABLE (_USER_SEG_TABLE_LEN|0x80000000)
169 #define _KERNSEG_TABLE (_KERNEL_SEG_TABLE_LEN)
171 * No mapping available
173 #define PAGE_NONE __pgprot(_PAGE_INVALID )
175 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
176 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_RO)
177 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_RO)
178 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY)
181 * The S390 can't do page protection for execute, and considers that the same are read.
182 * Also, write permissions imply read permissions. This is the closest we can get..
184 #define __P000 PAGE_NONE
185 #define __P001 PAGE_READONLY
186 #define __P010 PAGE_COPY
187 #define __P011 PAGE_COPY
188 #define __P100 PAGE_READONLY
189 #define __P101 PAGE_READONLY
190 #define __P110 PAGE_COPY
191 #define __P111 PAGE_COPY
193 #define __S000 PAGE_NONE
194 #define __S001 PAGE_READONLY
195 #define __S010 PAGE_SHARED
196 #define __S011 PAGE_SHARED
197 #define __S100 PAGE_READONLY
198 #define __S101 PAGE_READONLY
199 #define __S110 PAGE_SHARED
200 #define __S111 PAGE_SHARED
203 * Define this if things work differently on an i386 and an i486:
204 * it will (on an i486) warn about kernel memory accesses that are
205 * done without a 'verify_area(VERIFY_WRITE,..)'
207 * Kernel and User memory-access are done equal, so we don't need verify
209 #undef TEST_VERIFY_AREA
211 /* page table for 0-4MB for everybody */
212 extern unsigned long pg0[1024];
214 /* number of bits that fit into a memory pointer */
215 #define BITS_PER_PTR (8*sizeof(unsigned long))
217 /* to align the pointer to a pointer address */
218 #define PTR_MASK (~(sizeof(void*)-1))
220 /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
221 /* 64-bit machines, beware! SRB. */
222 #define SIZEOF_PTR_LOG2 2
224 /* to find an entry in a page-table */
225 #define PAGE_PTR(address) \
226 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
231 * CR 7 (SPST) and cr 13 (HPST) are set to the user pgdir.
232 * Kernel is running in its own, disjunct address space,
233 * running in primary address space.
234 * Copy to/from user is done via access register mode with
235 * access registers set to 0 or 1. For that purpose we need
236 * set up CR 7 with the user pgd.
240 #define SET_PAGE_DIR(tsk,pgdir) \
241 do { \
242 unsigned long __pgdir = (__pa(pgdir) & PAGE_MASK ) | _SEGMENT_TABLE; \
243 (tsk)->thread.user_seg = __pgdir; \
244 if ((tsk) == current) { \
245 __asm__ __volatile__("lctl 7,7,%0": :"m" (__pgdir)); \
246 __asm__ __volatile__("lctl 13,13,%0": :"m" (__pgdir)); \
248 } while (0)
251 * CR 7 (SPST) and cr 13 (HPST) are set to the user pgdir.
252 * Kernel is running in its own, disjunct address space,
253 * running in primary address space.
254 * Copy to/from user is done via access register mode with
255 * access registers set to 0 or 1. For that purpose we need
256 * set up CR 7 with the user pgd.
260 #define SET_PAGE_DIR(tsk,pgdir) \
261 do { \
262 unsigned long __pgdir = (__pa(pgdir) & PAGE_MASK ) | _SEGMENT_TABLE; \
263 (tsk)->thread.user_seg = __pgdir; \
264 if ((tsk) == current) { \
265 __asm__ __volatile__("lctl 7,7,%0": :"m" (__pgdir)); \
266 __asm__ __volatile__("lctl 13,13,%0": :"m" (__pgdir)); \
268 } while (0)
271 extern inline int pte_none(pte_t pte) { return ((pte_val(pte) & (_PAGE_INVALID | _PAGE_RO)) == _PAGE_INVALID); }
272 extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
273 extern inline void pte_clear(pte_t *ptep) { pte_val(*ptep) = _PAGE_INVALID; }
274 #define PTE_INIT(x) pte_clear(x)
275 extern inline int pte_pagenr(pte_t pte) { return ((unsigned long)((pte_val(pte) >> PAGE_SHIFT))); }
277 extern inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) & _PAGE_TABLE_INV; }
278 extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) == 0); }
279 extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _SEG_PRESENT; }
280 extern inline void pmd_clear(pmd_t * pmdp) {
281 pmd_val(pmdp[0]) = _PAGE_TABLE_INV;
282 pmd_val(pmdp[1]) = _PAGE_TABLE_INV;
283 pmd_val(pmdp[2]) = _PAGE_TABLE_INV;
284 pmd_val(pmdp[3]) = _PAGE_TABLE_INV;
288 * The "pgd_xxx()" functions here are trivial for a folded two-level
289 * setup: the pgd is never bad, and a pmd always exists (as it's folded
290 * into the pgd entry)
292 extern inline int pgd_none(pgd_t pgd) { return 0; }
293 extern inline int pgd_bad(pgd_t pgd) { return 0; }
294 extern inline int pgd_present(pgd_t pgd) { return 1; }
295 extern inline void pgd_clear(pgd_t * pgdp) { }
299 * The following only work if pte_present() is true.
300 * Undefined behaviour if not..
302 extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_RO); }
303 extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
304 extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
306 /* who needs that
307 extern inline int pte_read(pte_t pte) { return !(pte_val(pte) & _PAGE_INVALID); }
308 extern inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_INVALID); }
309 extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) |= _PAGE_INVALID; return pte; }
310 extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) |= _PAGE_INVALID; return pte; }
311 extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) &= _PAGE_INVALID; return pte; }
312 extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= _PAGE_INVALID; return pte; }
315 extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_RO; return pte; }
316 extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_RO ; return pte; }
318 extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
319 extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
321 extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
322 extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
326 * Conversion functions: convert a page and protection to a page entry,
327 * and a page entry and page directory to the page they refer to.
329 #define mk_pte(page, pgprot) \
330 ({ pte_t __pte; pte_val(__pte) = __pa(((page)-mem_map)<<PAGE_SHIFT) + pgprot_val(pgprot); __pte; })
332 /* This takes a physical page address that is used by the remapping functions */
333 #define mk_pte_phys(physpage, pgprot) \
334 ({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
336 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
337 { pte_val(pte) = (pte_val(pte) & PAGE_MASK) | pgprot_val(newprot); return pte; }
339 #define page_address(page) ((page)->virtual)
340 #define pte_page(x) (mem_map+pte_pagenr(x))
342 #define pmd_page(pmd) \
343 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
345 /* to find an entry in a page-table-directory */
346 #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
348 #define __pgd_offset(address) pgd_index(address)
350 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
352 /* to find an entry in a kernel page-table-directory */
353 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
355 /* Find an entry in the second-level page table.. */
356 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
358 return (pmd_t *) dir;
361 /* Find an entry in the third-level page table.. */
362 #define pte_offset(pmd, address) \
363 ((pte_t *) (pmd_page(*pmd) + ((address>>10) & ((PTRS_PER_PTE-1)<<2))))
366 /* We don't use pmd cache, so these are dummy routines */
367 extern __inline__ pmd_t *get_pmd_fast(void)
369 return (pmd_t *)0;
372 extern __inline__ void free_pmd_fast(pmd_t *pmd)
376 extern __inline__ void free_pmd_slow(pmd_t *pmd)
380 extern void __handle_bad_pmd(pmd_t *pmd);
381 extern void __handle_bad_pmd_kernel(pmd_t *pmd);
384 * The S390 doesn't have any external MMU info: the kernel page
385 * tables contain all the necessary information.
387 extern inline void update_mmu_cache(struct vm_area_struct * vma,
388 unsigned long address, pte_t pte)
393 * a page-table entry has only 19 bit for offset and 7 bit for type
394 * if bits 0, 20 or 23 are set, a translation specification exceptions occures, and it's
395 * hard to find out the failing address
396 * therefor, we zero out this bits
399 #define SWP_TYPE(entry) (((entry).val >> 1) & 0x3f)
400 #define SWP_OFFSET(entry) (((entry).val >> 12) & 0x7FFFF )
401 #define SWP_ENTRY(type,offset) ((swp_entry_t) { (((type) << 1) | \
402 ((offset) << 12) | \
403 _PAGE_INVALID | _PAGE_RO) \
404 & 0x7ffff6fe })
406 #define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
407 #define swp_entry_to_pte(x) ((pte_t) { (x).val })
409 #define module_map vmalloc
410 #define module_unmap vfree
412 #endif /* !__ASSEMBLY__ */
414 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
415 #define PageSkip(page) (0)
416 #define kern_addr_valid(addr) (1)
418 #endif /* _S390_PAGE_H */