2 * include/asm-s390/pgtable.h
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Ulrich Weigand (weigand@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
10 * Derived from "include/asm-i386/pgtable.h"
13 #ifndef _ASM_S390_PGTABLE_H
14 #define _ASM_S390_PGTABLE_H
17 * The Linux memory management assumes a three-level page table setup. For
18 * s390 31 bit we "fold" the mid level into the top-level page table, so
19 * that we physically have the same two-level page table as the s390 mmu
20 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
21 * the hardware provides (region first and region second tables are not
24 * The "pgd_xxx()" functions are trivial for a folded two-level
25 * setup: the pgd is never bad, and a pmd always exists (as it's folded
28 * This file contains the functions and defines necessary to modify and use
29 * the S390 page table tree.
32 #include <linux/sched.h>
33 #include <linux/mm_types.h>
34 #include <asm/bitops.h>
36 #include <asm/processor.h>
38 extern pgd_t swapper_pg_dir
[] __attribute__ ((aligned (4096)));
39 extern void paging_init(void);
40 extern void vmem_map_init(void);
43 * The S390 doesn't have any external MMU info: the kernel page
44 * tables contain all the necessary information.
46 #define update_mmu_cache(vma, address, ptep) do { } while (0)
49 * ZERO_PAGE is a global shared page that is always zero: used
50 * for zero-mapped memory areas etc..
52 extern char empty_zero_page
[PAGE_SIZE
];
53 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
54 #endif /* !__ASSEMBLY__ */
57 * PMD_SHIFT determines the size of the area a second-level page
59 * PGDIR_SHIFT determines what a third-level page table entry can map
64 # define PGDIR_SHIFT 20
68 # define PGDIR_SHIFT 42
69 #endif /* __s390x__ */
71 #define PMD_SIZE (1UL << PMD_SHIFT)
72 #define PMD_MASK (~(PMD_SIZE-1))
73 #define PUD_SIZE (1UL << PUD_SHIFT)
74 #define PUD_MASK (~(PUD_SIZE-1))
75 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
76 #define PGDIR_MASK (~(PGDIR_SIZE-1))
79 * entries per page directory level: the S390 is two-level, so
80 * we don't really have any PMD directory physically.
81 * for S390 segment-table entries are combined to one PGD
82 * that leads to 1024 pte per pgd
84 #define PTRS_PER_PTE 256
86 #define PTRS_PER_PMD 1
87 #define PTRS_PER_PUD 1
89 #define PTRS_PER_PMD 2048
90 #define PTRS_PER_PUD 2048
91 #endif /* __s390x__ */
92 #define PTRS_PER_PGD 2048
94 #define FIRST_USER_ADDRESS 0
96 #define pte_ERROR(e) \
97 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
98 #define pmd_ERROR(e) \
99 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
100 #define pud_ERROR(e) \
101 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
102 #define pgd_ERROR(e) \
103 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
107 * The vmalloc area will always be on the topmost area of the kernel
108 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc,
109 * which should be enough for any sane case.
110 * By putting vmalloc at the top, we maximise the gap between physical
111 * memory and vmalloc to catch misplaced memory accesses. As a side
112 * effect, this also makes sure that 64 bit module code cannot be used
113 * as system call address.
116 extern unsigned long VMALLOC_START
;
119 #define VMALLOC_SIZE (96UL << 20)
120 #define VMALLOC_END 0x7e000000UL
121 #define VMEM_MAP_END 0x80000000UL
122 #else /* __s390x__ */
123 #define VMALLOC_SIZE (128UL << 30)
124 #define VMALLOC_END 0x3e000000000UL
125 #define VMEM_MAP_END 0x40000000000UL
126 #endif /* __s390x__ */
129 * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1
130 * mapping. This needs to be calculated at compile time since the size of the
131 * VMEM_MAP is static but the size of struct page can change.
133 #define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page))
134 #define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES)
135 #define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1))
136 #define vmemmap ((struct page *) VMALLOC_END)
139 * A 31 bit pagetable entry of S390 has following format:
142 * 00000000001111111111222222222233
143 * 01234567890123456789012345678901
145 * I Page-Invalid Bit: Page is not available for address-translation
146 * P Page-Protection Bit: Store access not possible for page
148 * A 31 bit segmenttable entry of S390 has following format:
149 * | P-table origin | |PTL
151 * 00000000001111111111222222222233
152 * 01234567890123456789012345678901
154 * I Segment-Invalid Bit: Segment is not available for address-translation
155 * C Common-Segment Bit: Segment is not private (PoP 3-30)
156 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
158 * The 31 bit segmenttable origin of S390 has following format:
160 * |S-table origin | | STL |
162 * 00000000001111111111222222222233
163 * 01234567890123456789012345678901
165 * X Space-Switch event:
166 * G Segment-Invalid Bit: *
167 * P Private-Space Bit: Segment is not private (PoP 3-30)
168 * S Storage-Alteration:
169 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
171 * A 64 bit pagetable entry of S390 has following format:
173 * 0000000000111111111122222222223333333333444444444455555555556666
174 * 0123456789012345678901234567890123456789012345678901234567890123
176 * I Page-Invalid Bit: Page is not available for address-translation
177 * P Page-Protection Bit: Store access not possible for page
178 * C Change-bit override: HW is not required to set change bit
180 * A 64 bit segmenttable entry of S390 has following format:
181 * | P-table origin | TT
182 * 0000000000111111111122222222223333333333444444444455555555556666
183 * 0123456789012345678901234567890123456789012345678901234567890123
185 * I Segment-Invalid Bit: Segment is not available for address-translation
186 * C Common-Segment Bit: Segment is not private (PoP 3-30)
187 * P Page-Protection Bit: Store access not possible for page
190 * A 64 bit region table entry of S390 has following format:
191 * | S-table origin | TF TTTL
192 * 0000000000111111111122222222223333333333444444444455555555556666
193 * 0123456789012345678901234567890123456789012345678901234567890123
195 * I Segment-Invalid Bit: Segment is not available for address-translation
200 * The 64 bit regiontable origin of S390 has following format:
201 * | region table origon | DTTL
202 * 0000000000111111111122222222223333333333444444444455555555556666
203 * 0123456789012345678901234567890123456789012345678901234567890123
205 * X Space-Switch event:
206 * G Segment-Invalid Bit:
207 * P Private-Space Bit:
208 * S Storage-Alteration:
212 * A storage key has the following format:
216 * F : fetch protection bit
221 /* Hardware bits in the page table entry */
222 #define _PAGE_CO 0x100 /* HW Change-bit override */
223 #define _PAGE_RO 0x200 /* HW read-only bit */
224 #define _PAGE_INVALID 0x400 /* HW invalid bit */
226 /* Software bits in the page table entry */
227 #define _PAGE_SWT 0x001 /* SW pte type bit t */
228 #define _PAGE_SWX 0x002 /* SW pte type bit x */
229 #define _PAGE_SPECIAL 0x004 /* SW associated with special page */
230 #define __HAVE_ARCH_PTE_SPECIAL
232 /* Set of bits not changed in pte_modify */
233 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL)
235 /* Six different types of pages. */
236 #define _PAGE_TYPE_EMPTY 0x400
237 #define _PAGE_TYPE_NONE 0x401
238 #define _PAGE_TYPE_SWAP 0x403
239 #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
240 #define _PAGE_TYPE_RO 0x200
241 #define _PAGE_TYPE_RW 0x000
242 #define _PAGE_TYPE_EX_RO 0x202
243 #define _PAGE_TYPE_EX_RW 0x002
246 * Only four types for huge pages, using the invalid bit and protection bit
247 * of a segment table entry.
249 #define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */
250 #define _HPAGE_TYPE_NONE 0x220
251 #define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */
252 #define _HPAGE_TYPE_RW 0x000
255 * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
256 * pte_none and pte_file to find out the pte type WITHOUT holding the page
257 * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
258 * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
259 * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
260 * This change is done while holding the lock, but the intermediate step
261 * of a previously valid pte with the hw invalid bit set can be observed by
262 * handle_pte_fault. That makes it necessary that all valid pte types with
263 * the hw invalid bit set must be distinguishable from the four pte types
264 * empty, none, swap and file.
267 * _PAGE_TYPE_EMPTY 1000 -> 1000
268 * _PAGE_TYPE_NONE 1001 -> 1001
269 * _PAGE_TYPE_SWAP 1011 -> 1011
270 * _PAGE_TYPE_FILE 11?1 -> 11?1
271 * _PAGE_TYPE_RO 0100 -> 1100
272 * _PAGE_TYPE_RW 0000 -> 1000
273 * _PAGE_TYPE_EX_RO 0110 -> 1110
274 * _PAGE_TYPE_EX_RW 0010 -> 1010
276 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
277 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
278 * pte_file is true for bits combinations 1101, 1111
279 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
282 /* Page status table bits for virtualization */
283 #define RCP_PCL_BIT 55
284 #define RCP_HR_BIT 54
285 #define RCP_HC_BIT 53
286 #define RCP_GR_BIT 50
287 #define RCP_GC_BIT 49
289 /* User dirty bit for KVM's migration feature */
290 #define KVM_UD_BIT 47
294 /* Bits in the segment table address-space-control-element */
295 #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
296 #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
297 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
298 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
299 #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
301 /* Bits in the segment table entry */
302 #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
303 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
304 #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
305 #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
307 #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
308 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
310 #else /* __s390x__ */
312 /* Bits in the segment/region table address-space-control-element */
313 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
314 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
315 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
316 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
317 #define _ASCE_REAL_SPACE 0x20 /* real space control */
318 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
319 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
320 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
321 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
322 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
323 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
325 /* Bits in the region table entry */
326 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
327 #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
328 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
329 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
330 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
331 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
332 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
334 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
335 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
336 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
337 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
338 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
339 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
341 /* Bits in the segment table entry */
342 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
343 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
344 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
346 #define _SEGMENT_ENTRY (0)
347 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
349 #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
350 #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
352 #endif /* __s390x__ */
355 * A user page table pointer has the space-switch-event bit, the
356 * private-space-control bit and the storage-alteration-event-control
357 * bit set. A kernel page table pointer doesn't need them.
359 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
362 /* Bits int the storage key */
363 #define _PAGE_CHANGED 0x02 /* HW changed bit */
364 #define _PAGE_REFERENCED 0x04 /* HW referenced bit */
367 * Page protection definitions.
369 #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
370 #define PAGE_RO __pgprot(_PAGE_TYPE_RO)
371 #define PAGE_RW __pgprot(_PAGE_TYPE_RW)
372 #define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO)
373 #define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW)
375 #define PAGE_KERNEL PAGE_RW
376 #define PAGE_COPY PAGE_RO
379 * Dependent on the EXEC_PROTECT option s390 can do execute protection.
380 * Write permission always implies read permission. In theory with a
381 * primary/secondary page table execute only can be implemented but
382 * it would cost an additional bit in the pte to distinguish all the
383 * different pte types. To avoid that execute permission currently
384 * implies read permission as well.
387 #define __P000 PAGE_NONE
388 #define __P001 PAGE_RO
389 #define __P010 PAGE_RO
390 #define __P011 PAGE_RO
391 #define __P100 PAGE_EX_RO
392 #define __P101 PAGE_EX_RO
393 #define __P110 PAGE_EX_RO
394 #define __P111 PAGE_EX_RO
396 #define __S000 PAGE_NONE
397 #define __S001 PAGE_RO
398 #define __S010 PAGE_RW
399 #define __S011 PAGE_RW
400 #define __S100 PAGE_EX_RO
401 #define __S101 PAGE_EX_RO
402 #define __S110 PAGE_EX_RW
403 #define __S111 PAGE_EX_RW
406 # define PxD_SHADOW_SHIFT 1
407 #else /* __s390x__ */
408 # define PxD_SHADOW_SHIFT 2
409 #endif /* __s390x__ */
411 static inline void *get_shadow_table(void *table
)
413 unsigned long addr
, offset
;
416 addr
= (unsigned long) table
;
417 offset
= addr
& ((PAGE_SIZE
<< PxD_SHADOW_SHIFT
) - 1);
418 page
= virt_to_page((void *)(addr
^ offset
));
419 return (void *)(addr_t
)(page
->index
? (page
->index
| offset
) : 0UL);
423 * Certain architectures need to do special things when PTEs
424 * within a page table are directly modified. Thus, the following
425 * hook is made available.
427 static inline void set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
428 pte_t
*ptep
, pte_t entry
)
431 if (mm
->context
.noexec
) {
432 if (!(pte_val(entry
) & _PAGE_INVALID
) &&
433 (pte_val(entry
) & _PAGE_SWX
))
434 pte_val(entry
) |= _PAGE_RO
;
436 pte_val(entry
) = _PAGE_TYPE_EMPTY
;
437 ptep
[PTRS_PER_PTE
] = entry
;
442 * pgd/pmd/pte query functions
446 static inline int pgd_present(pgd_t pgd
) { return 1; }
447 static inline int pgd_none(pgd_t pgd
) { return 0; }
448 static inline int pgd_bad(pgd_t pgd
) { return 0; }
450 static inline int pud_present(pud_t pud
) { return 1; }
451 static inline int pud_none(pud_t pud
) { return 0; }
452 static inline int pud_bad(pud_t pud
) { return 0; }
454 #else /* __s390x__ */
456 static inline int pgd_present(pgd_t pgd
)
458 if ((pgd_val(pgd
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R2
)
460 return (pgd_val(pgd
) & _REGION_ENTRY_ORIGIN
) != 0UL;
463 static inline int pgd_none(pgd_t pgd
)
465 if ((pgd_val(pgd
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R2
)
467 return (pgd_val(pgd
) & _REGION_ENTRY_INV
) != 0UL;
470 static inline int pgd_bad(pgd_t pgd
)
473 * With dynamic page table levels the pgd can be a region table
474 * entry or a segment table entry. Check for the bit that are
475 * invalid for either table entry.
478 ~_SEGMENT_ENTRY_ORIGIN
& ~_REGION_ENTRY_INV
&
479 ~_REGION_ENTRY_TYPE_MASK
& ~_REGION_ENTRY_LENGTH
;
480 return (pgd_val(pgd
) & mask
) != 0;
483 static inline int pud_present(pud_t pud
)
485 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R3
)
487 return (pud_val(pud
) & _REGION_ENTRY_ORIGIN
) != 0UL;
490 static inline int pud_none(pud_t pud
)
492 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R3
)
494 return (pud_val(pud
) & _REGION_ENTRY_INV
) != 0UL;
497 static inline int pud_bad(pud_t pud
)
500 * With dynamic page table levels the pud can be a region table
501 * entry or a segment table entry. Check for the bit that are
502 * invalid for either table entry.
505 ~_SEGMENT_ENTRY_ORIGIN
& ~_REGION_ENTRY_INV
&
506 ~_REGION_ENTRY_TYPE_MASK
& ~_REGION_ENTRY_LENGTH
;
507 return (pud_val(pud
) & mask
) != 0;
510 #endif /* __s390x__ */
512 static inline int pmd_present(pmd_t pmd
)
514 return (pmd_val(pmd
) & _SEGMENT_ENTRY_ORIGIN
) != 0UL;
517 static inline int pmd_none(pmd_t pmd
)
519 return (pmd_val(pmd
) & _SEGMENT_ENTRY_INV
) != 0UL;
522 static inline int pmd_bad(pmd_t pmd
)
524 unsigned long mask
= ~_SEGMENT_ENTRY_ORIGIN
& ~_SEGMENT_ENTRY_INV
;
525 return (pmd_val(pmd
) & mask
) != _SEGMENT_ENTRY
;
528 static inline int pte_none(pte_t pte
)
530 return (pte_val(pte
) & _PAGE_INVALID
) && !(pte_val(pte
) & _PAGE_SWT
);
533 static inline int pte_present(pte_t pte
)
535 unsigned long mask
= _PAGE_RO
| _PAGE_INVALID
| _PAGE_SWT
| _PAGE_SWX
;
536 return (pte_val(pte
) & mask
) == _PAGE_TYPE_NONE
||
537 (!(pte_val(pte
) & _PAGE_INVALID
) &&
538 !(pte_val(pte
) & _PAGE_SWT
));
541 static inline int pte_file(pte_t pte
)
543 unsigned long mask
= _PAGE_RO
| _PAGE_INVALID
| _PAGE_SWT
;
544 return (pte_val(pte
) & mask
) == _PAGE_TYPE_FILE
;
547 static inline int pte_special(pte_t pte
)
549 return (pte_val(pte
) & _PAGE_SPECIAL
);
552 #define __HAVE_ARCH_PTE_SAME
553 #define pte_same(a,b) (pte_val(a) == pte_val(b))
555 static inline void rcp_lock(pte_t
*ptep
)
558 unsigned long *pgste
= (unsigned long *) (ptep
+ PTRS_PER_PTE
);
560 while (test_and_set_bit(RCP_PCL_BIT
, pgste
))
565 static inline void rcp_unlock(pte_t
*ptep
)
568 unsigned long *pgste
= (unsigned long *) (ptep
+ PTRS_PER_PTE
);
569 clear_bit(RCP_PCL_BIT
, pgste
);
574 /* forward declaration for SetPageUptodate in page-flags.h*/
575 static inline void page_clear_dirty(struct page
*page
);
576 #include <linux/page-flags.h>
578 static inline void ptep_rcp_copy(pte_t
*ptep
)
581 struct page
*page
= virt_to_page(pte_val(*ptep
));
583 unsigned long *pgste
= (unsigned long *) (ptep
+ PTRS_PER_PTE
);
585 skey
= page_get_storage_key(page_to_phys(page
));
586 if (skey
& _PAGE_CHANGED
) {
587 set_bit_simple(RCP_GC_BIT
, pgste
);
588 set_bit_simple(KVM_UD_BIT
, pgste
);
590 if (skey
& _PAGE_REFERENCED
)
591 set_bit_simple(RCP_GR_BIT
, pgste
);
592 if (test_and_clear_bit_simple(RCP_HC_BIT
, pgste
)) {
594 set_bit_simple(KVM_UD_BIT
, pgste
);
596 if (test_and_clear_bit_simple(RCP_HR_BIT
, pgste
))
597 SetPageReferenced(page
);
602 * query functions pte_write/pte_dirty/pte_young only work if
603 * pte_present() is true. Undefined behaviour if not..
605 static inline int pte_write(pte_t pte
)
607 return (pte_val(pte
) & _PAGE_RO
) == 0;
610 static inline int pte_dirty(pte_t pte
)
612 /* A pte is neither clean nor dirty on s/390. The dirty bit
613 * is in the storage key. See page_test_and_clear_dirty for
619 static inline int pte_young(pte_t pte
)
621 /* A pte is neither young nor old on s/390. The young bit
622 * is in the storage key. See page_test_and_clear_young for
629 * pgd/pmd/pte modification functions
634 #define pgd_clear(pgd) do { } while (0)
635 #define pud_clear(pud) do { } while (0)
637 #else /* __s390x__ */
639 static inline void pgd_clear_kernel(pgd_t
* pgd
)
641 if ((pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R2
)
642 pgd_val(*pgd
) = _REGION2_ENTRY_EMPTY
;
645 static inline void pgd_clear(pgd_t
* pgd
)
647 pgd_t
*shadow
= get_shadow_table(pgd
);
649 pgd_clear_kernel(pgd
);
651 pgd_clear_kernel(shadow
);
654 static inline void pud_clear_kernel(pud_t
*pud
)
656 if ((pud_val(*pud
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R3
)
657 pud_val(*pud
) = _REGION3_ENTRY_EMPTY
;
660 static inline void pud_clear(pud_t
*pud
)
662 pud_t
*shadow
= get_shadow_table(pud
);
664 pud_clear_kernel(pud
);
666 pud_clear_kernel(shadow
);
669 #endif /* __s390x__ */
671 static inline void pmd_clear_kernel(pmd_t
* pmdp
)
673 pmd_val(*pmdp
) = _SEGMENT_ENTRY_EMPTY
;
676 static inline void pmd_clear(pmd_t
*pmd
)
678 pmd_t
*shadow
= get_shadow_table(pmd
);
680 pmd_clear_kernel(pmd
);
682 pmd_clear_kernel(shadow
);
685 static inline void pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
687 pte_val(*ptep
) = _PAGE_TYPE_EMPTY
;
688 if (mm
->context
.noexec
)
689 pte_val(ptep
[PTRS_PER_PTE
]) = _PAGE_TYPE_EMPTY
;
693 * The following pte modification functions only work if
694 * pte_present() is true. Undefined behaviour if not..
696 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
698 pte_val(pte
) &= _PAGE_CHG_MASK
;
699 pte_val(pte
) |= pgprot_val(newprot
);
703 static inline pte_t
pte_wrprotect(pte_t pte
)
705 /* Do not clobber _PAGE_TYPE_NONE pages! */
706 if (!(pte_val(pte
) & _PAGE_INVALID
))
707 pte_val(pte
) |= _PAGE_RO
;
711 static inline pte_t
pte_mkwrite(pte_t pte
)
713 pte_val(pte
) &= ~_PAGE_RO
;
717 static inline pte_t
pte_mkclean(pte_t pte
)
719 /* The only user of pte_mkclean is the fork() code.
720 We must *not* clear the *physical* page dirty bit
721 just because fork() wants to clear the dirty bit in
722 *one* of the page's mappings. So we just do nothing. */
726 static inline pte_t
pte_mkdirty(pte_t pte
)
728 /* We do not explicitly set the dirty bit because the
729 * sske instruction is slow. It is faster to let the
730 * next instruction set the dirty bit.
735 static inline pte_t
pte_mkold(pte_t pte
)
737 /* S/390 doesn't keep its dirty/referenced bit in the pte.
738 * There is no point in clearing the real referenced bit.
743 static inline pte_t
pte_mkyoung(pte_t pte
)
745 /* S/390 doesn't keep its dirty/referenced bit in the pte.
746 * There is no point in setting the real referenced bit.
751 static inline pte_t
pte_mkspecial(pte_t pte
)
753 pte_val(pte
) |= _PAGE_SPECIAL
;
759 * Get (and clear) the user dirty bit for a PTE.
761 static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct
*mm
,
765 unsigned long *pgste
;
769 if (!mm
->context
.has_pgste
)
772 pgste
= (unsigned long *) (ptep
+ PTRS_PER_PTE
);
773 page
= virt_to_page(pte_val(*ptep
));
774 skey
= page_get_storage_key(page_to_phys(page
));
775 if (skey
& _PAGE_CHANGED
) {
776 set_bit_simple(RCP_GC_BIT
, pgste
);
777 set_bit_simple(KVM_UD_BIT
, pgste
);
779 if (test_and_clear_bit_simple(RCP_HC_BIT
, pgste
)) {
781 set_bit_simple(KVM_UD_BIT
, pgste
);
783 dirty
= test_and_clear_bit_simple(KVM_UD_BIT
, pgste
);
784 if (skey
& _PAGE_CHANGED
)
785 page_clear_dirty(page
);
791 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
792 static inline int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
793 unsigned long addr
, pte_t
*ptep
)
796 unsigned long physpage
;
798 unsigned long *pgste
;
800 if (!vma
->vm_mm
->context
.has_pgste
)
802 physpage
= pte_val(*ptep
) & PAGE_MASK
;
803 pgste
= (unsigned long *) (ptep
+ PTRS_PER_PTE
);
805 young
= ((page_get_storage_key(physpage
) & _PAGE_REFERENCED
) != 0);
808 set_bit_simple(RCP_GR_BIT
, pgste
);
809 young
|= test_and_clear_bit_simple(RCP_HR_BIT
, pgste
);
816 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
817 static inline int ptep_clear_flush_young(struct vm_area_struct
*vma
,
818 unsigned long address
, pte_t
*ptep
)
820 /* No need to flush TLB
821 * On s390 reference bits are in storage key and never in TLB
822 * With virtualization we handle the reference bit, without we
823 * we can simply return */
825 return ptep_test_and_clear_young(vma
, address
, ptep
);
830 static inline void __ptep_ipte(unsigned long address
, pte_t
*ptep
)
832 if (!(pte_val(*ptep
) & _PAGE_INVALID
)) {
834 /* pto must point to the start of the segment table */
835 pte_t
*pto
= (pte_t
*) (((unsigned long) ptep
) & 0x7ffffc00);
837 /* ipte in zarch mode can do the math */
842 : "=m" (*ptep
) : "m" (*ptep
),
843 "a" (pto
), "a" (address
));
847 static inline void ptep_invalidate(struct mm_struct
*mm
,
848 unsigned long address
, pte_t
*ptep
)
850 if (mm
->context
.has_pgste
) {
852 __ptep_ipte(address
, ptep
);
854 pte_val(*ptep
) = _PAGE_TYPE_EMPTY
;
858 __ptep_ipte(address
, ptep
);
859 pte_val(*ptep
) = _PAGE_TYPE_EMPTY
;
860 if (mm
->context
.noexec
) {
861 __ptep_ipte(address
, ptep
+ PTRS_PER_PTE
);
862 pte_val(*(ptep
+ PTRS_PER_PTE
)) = _PAGE_TYPE_EMPTY
;
867 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
868 * both clear the TLB for the unmapped pte. The reason is that
869 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
870 * to modify an active pte. The sequence is
871 * 1) ptep_get_and_clear
874 * On s390 the tlb needs to get flushed with the modification of the pte
875 * if the pte is active. The only way how this can be implemented is to
876 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
879 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
880 #define ptep_get_and_clear(__mm, __address, __ptep) \
882 pte_t __pte = *(__ptep); \
883 (__mm)->context.flush_mm = 1; \
884 if (atomic_read(&(__mm)->context.attach_count) > 1 || \
885 (__mm) != current->active_mm) \
886 ptep_invalidate(__mm, __address, __ptep); \
888 pte_clear((__mm), (__address), (__ptep)); \
892 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
893 static inline pte_t
ptep_clear_flush(struct vm_area_struct
*vma
,
894 unsigned long address
, pte_t
*ptep
)
897 ptep_invalidate(vma
->vm_mm
, address
, ptep
);
902 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
903 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
904 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
905 * cannot be accessed while the batched unmap is running. In this case
906 * full==1 and a simple pte_clear is enough. See tlb.h.
908 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
909 static inline pte_t
ptep_get_and_clear_full(struct mm_struct
*mm
,
911 pte_t
*ptep
, int full
)
916 pte_clear(mm
, addr
, ptep
);
918 ptep_invalidate(mm
, addr
, ptep
);
922 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
923 #define ptep_set_wrprotect(__mm, __addr, __ptep) \
925 pte_t __pte = *(__ptep); \
926 if (pte_write(__pte)) { \
927 (__mm)->context.flush_mm = 1; \
928 if (atomic_read(&(__mm)->context.attach_count) > 1 || \
929 (__mm) != current->active_mm) \
930 ptep_invalidate(__mm, __addr, __ptep); \
931 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
935 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
936 #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
938 int __changed = !pte_same(*(__ptep), __entry); \
940 ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
941 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
947 * Test and clear dirty bit in storage key.
948 * We can't clear the changed bit atomically. This is a potential
949 * race against modification of the referenced bit. This function
950 * should therefore only be called if it is not mapped in any
953 #define __HAVE_ARCH_PAGE_TEST_DIRTY
954 static inline int page_test_dirty(struct page
*page
)
956 return (page_get_storage_key(page_to_phys(page
)) & _PAGE_CHANGED
) != 0;
959 #define __HAVE_ARCH_PAGE_CLEAR_DIRTY
960 static inline void page_clear_dirty(struct page
*page
)
962 page_set_storage_key(page_to_phys(page
), PAGE_DEFAULT_KEY
);
966 * Test and clear referenced bit in storage key.
968 #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
969 static inline int page_test_and_clear_young(struct page
*page
)
971 unsigned long physpage
= page_to_phys(page
);
978 : "=d" (ccode
) : "a" (physpage
) : "cc" );
983 * Conversion functions: convert a page and protection to a page entry,
984 * and a page entry and page directory to the page they refer to.
986 static inline pte_t
mk_pte_phys(unsigned long physpage
, pgprot_t pgprot
)
989 pte_val(__pte
) = physpage
+ pgprot_val(pgprot
);
993 static inline pte_t
mk_pte(struct page
*page
, pgprot_t pgprot
)
995 unsigned long physpage
= page_to_phys(page
);
997 return mk_pte_phys(physpage
, pgprot
);
1000 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1001 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1002 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1003 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1005 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1006 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1010 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1011 #define pud_deref(pmd) ({ BUG(); 0UL; })
1012 #define pgd_deref(pmd) ({ BUG(); 0UL; })
1014 #define pud_offset(pgd, address) ((pud_t *) pgd)
1015 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1017 #else /* __s390x__ */
1019 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1020 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1021 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1023 static inline pud_t
*pud_offset(pgd_t
*pgd
, unsigned long address
)
1025 pud_t
*pud
= (pud_t
*) pgd
;
1026 if ((pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R2
)
1027 pud
= (pud_t
*) pgd_deref(*pgd
);
1028 return pud
+ pud_index(address
);
1031 static inline pmd_t
*pmd_offset(pud_t
*pud
, unsigned long address
)
1033 pmd_t
*pmd
= (pmd_t
*) pud
;
1034 if ((pud_val(*pud
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R3
)
1035 pmd
= (pmd_t
*) pud_deref(*pud
);
1036 return pmd
+ pmd_index(address
);
1039 #endif /* __s390x__ */
1041 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1042 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1043 #define pte_page(x) pfn_to_page(pte_pfn(x))
1045 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
1047 /* Find an entry in the lowest level page table.. */
1048 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1049 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1050 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1051 #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
1052 #define pte_unmap(pte) do { } while (0)
1053 #define pte_unmap_nested(pte) do { } while (0)
1056 * 31 bit swap entry format:
1057 * A page-table entry has some bits we have to treat in a special way.
1058 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1059 * exception will occur instead of a page translation exception. The
1060 * specifiation exception has the bad habit not to store necessary
1061 * information in the lowcore.
1062 * Bit 21 and bit 22 are the page invalid bit and the page protection
1063 * bit. We set both to indicate a swapped page.
1064 * Bit 30 and 31 are used to distinguish the different page types. For
1065 * a swapped page these bits need to be zero.
1066 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1067 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1068 * plus 24 for the offset.
1069 * 0| offset |0110|o|type |00|
1070 * 0 0000000001111111111 2222 2 22222 33
1071 * 0 1234567890123456789 0123 4 56789 01
1073 * 64 bit swap entry format:
1074 * A page-table entry has some bits we have to treat in a special way.
1075 * Bits 52 and bit 55 have to be zero, otherwise an specification
1076 * exception will occur instead of a page translation exception. The
1077 * specifiation exception has the bad habit not to store necessary
1078 * information in the lowcore.
1079 * Bit 53 and bit 54 are the page invalid bit and the page protection
1080 * bit. We set both to indicate a swapped page.
1081 * Bit 62 and 63 are used to distinguish the different page types. For
1082 * a swapped page these bits need to be zero.
1083 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1084 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1085 * plus 56 for the offset.
1086 * | offset |0110|o|type |00|
1087 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1088 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1091 #define __SWP_OFFSET_MASK (~0UL >> 12)
1093 #define __SWP_OFFSET_MASK (~0UL >> 11)
1095 static inline pte_t
mk_swap_pte(unsigned long type
, unsigned long offset
)
1098 offset
&= __SWP_OFFSET_MASK
;
1099 pte_val(pte
) = _PAGE_TYPE_SWAP
| ((type
& 0x1f) << 2) |
1100 ((offset
& 1UL) << 7) | ((offset
& ~1UL) << 11);
1104 #define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1105 #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1106 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1108 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1109 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1112 # define PTE_FILE_MAX_BITS 26
1113 #else /* __s390x__ */
1114 # define PTE_FILE_MAX_BITS 59
1115 #endif /* __s390x__ */
1117 #define pte_to_pgoff(__pte) \
1118 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
1120 #define pgoff_to_pte(__off) \
1121 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
1122 | _PAGE_TYPE_FILE })
1124 #endif /* !__ASSEMBLY__ */
1126 #define kern_addr_valid(addr) (1)
1128 extern int vmem_add_mapping(unsigned long start
, unsigned long size
);
1129 extern int vmem_remove_mapping(unsigned long start
, unsigned long size
);
1130 extern int s390_enable_sie(void);
1133 * No page table caches to initialise
1135 #define pgtable_cache_init() do { } while (0)
1137 #include <asm-generic/pgtable.h>
1139 #endif /* _S390_PAGE_H */