2 * IBM System z Huge TLB Page Support for Kernel.
4 * Copyright 2007 IBM Corp.
5 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
9 #include <linux/hugetlb.h>
12 void set_huge_pte_at(struct mm_struct
*mm
, unsigned long addr
,
13 pte_t
*pteptr
, pte_t pteval
)
15 pmd_t
*pmdp
= (pmd_t
*) pteptr
;
16 pte_t shadow_pteval
= pteval
;
19 if (!MACHINE_HAS_HPAGE
) {
20 pteptr
= (pte_t
*) pte_page(pteval
)[1].index
;
21 mask
= pte_val(pteval
) &
22 (_SEGMENT_ENTRY_INV
| _SEGMENT_ENTRY_RO
);
23 pte_val(pteval
) = (_SEGMENT_ENTRY
+ __pa(pteptr
)) | mask
;
24 if (mm
->context
.noexec
) {
25 pteptr
+= PTRS_PER_PTE
;
26 pte_val(shadow_pteval
) =
27 (_SEGMENT_ENTRY
+ __pa(pteptr
)) | mask
;
31 pmd_val(*pmdp
) = pte_val(pteval
);
32 if (mm
->context
.noexec
) {
33 pmdp
= get_shadow_table(pmdp
);
34 pmd_val(*pmdp
) = pte_val(shadow_pteval
);
38 int arch_prepare_hugepage(struct page
*page
)
40 unsigned long addr
= page_to_phys(page
);
45 if (MACHINE_HAS_HPAGE
)
48 ptep
= (pte_t
*) pte_alloc_one(&init_mm
, address
);
52 pte
= mk_pte(page
, PAGE_RW
);
53 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
54 set_pte_at(&init_mm
, addr
+ i
* PAGE_SIZE
, ptep
+ i
, pte
);
55 pte_val(pte
) += PAGE_SIZE
;
57 page
[1].index
= (unsigned long) ptep
;
61 void arch_release_hugepage(struct page
*page
)
65 if (MACHINE_HAS_HPAGE
)
68 ptep
= (pte_t
*) page
[1].index
;
71 pte_free(&init_mm
, ptep
);
75 pte_t
*huge_pte_alloc(struct mm_struct
*mm
, unsigned long addr
)
81 pgdp
= pgd_offset(mm
, addr
);
82 pudp
= pud_alloc(mm
, pgdp
, addr
);
84 pmdp
= pmd_alloc(mm
, pudp
, addr
);
85 return (pte_t
*) pmdp
;
88 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
)
94 pgdp
= pgd_offset(mm
, addr
);
95 if (pgd_present(*pgdp
)) {
96 pudp
= pud_offset(pgdp
, addr
);
97 if (pud_present(*pudp
))
98 pmdp
= pmd_offset(pudp
, addr
);
100 return (pte_t
*) pmdp
;
103 int huge_pmd_unshare(struct mm_struct
*mm
, unsigned long *addr
, pte_t
*ptep
)
108 struct page
*follow_huge_addr(struct mm_struct
*mm
, unsigned long address
,
111 return ERR_PTR(-EINVAL
);
114 int pmd_huge(pmd_t pmd
)
116 if (!MACHINE_HAS_HPAGE
)
119 return !!(pmd_val(pmd
) & _SEGMENT_ENTRY_LARGE
);
122 struct page
*follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
123 pmd_t
*pmdp
, int write
)
127 if (!MACHINE_HAS_HPAGE
)
130 page
= pmd_page(*pmdp
);
132 page
+= ((address
& ~HPAGE_MASK
) >> PAGE_SHIFT
);