x86: early_init_centaur(): use set_cpu_cap()
[linux-2.6/btrfs-unstable.git] / include / asm-s390 / hugetlb.h
blob600a776f8f75121fb378ddda4e0aa82427ff7f14
1 /*
2 * IBM System z Huge TLB Page Support for Kernel.
4 * Copyright IBM Corp. 2008
5 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
6 */
8 #ifndef _ASM_S390_HUGETLB_H
9 #define _ASM_S390_HUGETLB_H
11 #include <asm/page.h>
12 #include <asm/pgtable.h>
15 #define is_hugepage_only_range(mm, addr, len) 0
16 #define hugetlb_free_pgd_range free_pgd_range
18 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
19 pte_t *ptep, pte_t pte);
22 * If the arch doesn't supply something else, assume that hugepage
23 * size aligned regions are ok without further preparation.
25 static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
27 if (len & ~HPAGE_MASK)
28 return -EINVAL;
29 if (addr & ~HPAGE_MASK)
30 return -EINVAL;
31 return 0;
34 #define hugetlb_prefault_arch_hook(mm) do { } while (0)
36 int arch_prepare_hugepage(struct page *page);
37 void arch_release_hugepage(struct page *page);
39 static inline pte_t pte_mkhuge(pte_t pte)
42 * PROT_NONE needs to be remapped from the pte type to the ste type.
43 * The HW invalid bit is also different for pte and ste. The pte
44 * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
45 * bit, so we don't have to clear it.
47 if (pte_val(pte) & _PAGE_INVALID) {
48 if (pte_val(pte) & _PAGE_SWT)
49 pte_val(pte) |= _HPAGE_TYPE_NONE;
50 pte_val(pte) |= _SEGMENT_ENTRY_INV;
53 * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
54 * table entry.
56 pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
58 * Also set the change-override bit because we don't need dirty bit
59 * tracking for hugetlbfs pages.
61 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
62 return pte;
65 static inline pte_t huge_pte_wrprotect(pte_t pte)
67 pte_val(pte) |= _PAGE_RO;
68 return pte;
71 static inline int huge_pte_none(pte_t pte)
73 return (pte_val(pte) & _SEGMENT_ENTRY_INV) &&
74 !(pte_val(pte) & _SEGMENT_ENTRY_RO);
77 static inline pte_t huge_ptep_get(pte_t *ptep)
79 pte_t pte = *ptep;
80 unsigned long mask;
82 if (!MACHINE_HAS_HPAGE) {
83 ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
84 if (ptep) {
85 mask = pte_val(pte) &
86 (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
87 pte = pte_mkhuge(*ptep);
88 pte_val(pte) |= mask;
91 return pte;
94 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
95 unsigned long addr, pte_t *ptep)
97 pte_t pte = huge_ptep_get(ptep);
99 pmd_clear((pmd_t *) ptep);
100 return pte;
103 static inline void __pmd_csp(pmd_t *pmdp)
105 register unsigned long reg2 asm("2") = pmd_val(*pmdp);
106 register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
107 _SEGMENT_ENTRY_INV;
108 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
110 asm volatile(
111 " csp %1,%3"
112 : "=m" (*pmdp)
113 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
114 pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
117 static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
119 unsigned long sto = (unsigned long) pmdp -
120 pmd_index(address) * sizeof(pmd_t);
122 if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
123 asm volatile(
124 " .insn rrf,0xb98e0000,%2,%3,0,0"
125 : "=m" (*pmdp)
126 : "m" (*pmdp), "a" (sto),
127 "a" ((address & HPAGE_MASK))
130 pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
133 static inline void huge_ptep_invalidate(struct mm_struct *mm,
134 unsigned long address, pte_t *ptep)
136 pmd_t *pmdp = (pmd_t *) ptep;
138 if (!MACHINE_HAS_IDTE) {
139 __pmd_csp(pmdp);
140 if (mm->context.noexec) {
141 pmdp = get_shadow_table(pmdp);
142 __pmd_csp(pmdp);
144 return;
147 __pmd_idte(address, pmdp);
148 if (mm->context.noexec) {
149 pmdp = get_shadow_table(pmdp);
150 __pmd_idte(address, pmdp);
152 return;
155 #define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
156 ({ \
157 int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
158 if (__changed) { \
159 huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
160 set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
162 __changed; \
165 #define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \
166 ({ \
167 pte_t __pte = huge_ptep_get(__ptep); \
168 if (pte_write(__pte)) { \
169 if (atomic_read(&(__mm)->mm_users) > 1 || \
170 (__mm) != current->active_mm) \
171 huge_ptep_invalidate(__mm, __addr, __ptep); \
172 set_huge_pte_at(__mm, __addr, __ptep, \
173 huge_pte_wrprotect(__pte)); \
177 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
178 unsigned long address, pte_t *ptep)
180 huge_ptep_invalidate(vma->vm_mm, address, ptep);
183 #endif /* _ASM_S390_HUGETLB_H */