2 * IBM System z Huge TLB Page Support for Kernel.
4 * Copyright 2007 IBM Corp.
5 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
9 #include <linux/hugetlb.h>
12 void set_huge_pte_at(struct mm_struct
*mm
, unsigned long addr
,
13 pte_t
*pteptr
, pte_t pteval
)
15 pmd_t
*pmdp
= (pmd_t
*) pteptr
;
16 pte_t shadow_pteval
= pteval
;
19 if (!MACHINE_HAS_HPAGE
) {
20 pteptr
= (pte_t
*) pte_page(pteval
)[1].index
;
21 mask
= pte_val(pteval
) &
22 (_SEGMENT_ENTRY_INV
| _SEGMENT_ENTRY_RO
);
23 pte_val(pteval
) = (_SEGMENT_ENTRY
+ __pa(pteptr
)) | mask
;
24 if (mm
->context
.noexec
) {
25 pteptr
+= PTRS_PER_PTE
;
26 pte_val(shadow_pteval
) =
27 (_SEGMENT_ENTRY
+ __pa(pteptr
)) | mask
;
31 pmd_val(*pmdp
) = pte_val(pteval
);
32 if (mm
->context
.noexec
) {
33 pmdp
= get_shadow_table(pmdp
);
34 pmd_val(*pmdp
) = pte_val(shadow_pteval
);
38 int arch_prepare_hugepage(struct page
*page
)
40 unsigned long addr
= page_to_phys(page
);
45 if (MACHINE_HAS_HPAGE
)
48 ptep
= (pte_t
*) pte_alloc_one(&init_mm
, address
);
52 pte
= mk_pte(page
, PAGE_RW
);
53 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
54 set_pte_at(&init_mm
, addr
+ i
* PAGE_SIZE
, ptep
+ i
, pte
);
55 pte_val(pte
) += PAGE_SIZE
;
57 page
[1].index
= (unsigned long) ptep
;
61 void arch_release_hugepage(struct page
*page
)
65 if (MACHINE_HAS_HPAGE
)
68 ptep
= (pte_t
*) page
[1].index
;
71 page_table_free(&init_mm
, (unsigned long *) ptep
);
75 pte_t
*huge_pte_alloc(struct mm_struct
*mm
,
76 unsigned long addr
, unsigned long sz
)
82 pgdp
= pgd_offset(mm
, addr
);
83 pudp
= pud_alloc(mm
, pgdp
, addr
);
85 pmdp
= pmd_alloc(mm
, pudp
, addr
);
86 return (pte_t
*) pmdp
;
89 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
)
95 pgdp
= pgd_offset(mm
, addr
);
96 if (pgd_present(*pgdp
)) {
97 pudp
= pud_offset(pgdp
, addr
);
98 if (pud_present(*pudp
))
99 pmdp
= pmd_offset(pudp
, addr
);
101 return (pte_t
*) pmdp
;
104 int huge_pmd_unshare(struct mm_struct
*mm
, unsigned long *addr
, pte_t
*ptep
)
109 struct page
*follow_huge_addr(struct mm_struct
*mm
, unsigned long address
,
112 return ERR_PTR(-EINVAL
);
115 int pmd_huge(pmd_t pmd
)
117 if (!MACHINE_HAS_HPAGE
)
120 return !!(pmd_val(pmd
) & _SEGMENT_ENTRY_LARGE
);
123 int pud_huge(pud_t pud
)
128 struct page
*follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
129 pmd_t
*pmdp
, int write
)
133 if (!MACHINE_HAS_HPAGE
)
136 page
= pmd_page(*pmdp
);
138 page
+= ((address
& ~HPAGE_MASK
) >> PAGE_SHIFT
);