2 * include/asm-s390/pgalloc.h
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Derived from "include/asm-i386/pgalloc.h"
10 * Copyright (C) 1994 Linus Torvalds
13 #ifndef _S390_PGALLOC_H
14 #define _S390_PGALLOC_H
16 #include <linux/threads.h>
17 #include <linux/gfp.h>
20 #define check_pgt_cache() do {} while (0)
23 * Page allocation orders.
26 # define PTE_ALLOC_ORDER 0
27 # define PMD_ALLOC_ORDER 0
28 # define PGD_ALLOC_ORDER 1
30 # define PTE_ALLOC_ORDER 0
31 # define PMD_ALLOC_ORDER 2
32 # define PGD_ALLOC_ORDER 2
33 #endif /* __s390x__ */
36 * Allocate and free page tables. The xxx_kernel() versions are
37 * used to allocate a kernel page table - this turns on ASN bits
41 static inline pgd_t
*pgd_alloc(struct mm_struct
*mm
)
43 pgd_t
*pgd
= (pgd_t
*) __get_free_pages(GFP_KERNEL
, PGD_ALLOC_ORDER
);
49 pgd_t
*shadow_pgd
= (pgd_t
*)
50 __get_free_pages(GFP_KERNEL
, PGD_ALLOC_ORDER
);
51 struct page
*page
= virt_to_page(pgd
);
54 free_pages((unsigned long) pgd
, PGD_ALLOC_ORDER
);
57 page
->lru
.next
= (void *) shadow_pgd
;
59 for (i
= 0; i
< PTRS_PER_PGD
; i
++)
61 pmd_clear(pmd_offset(pgd
+ i
, i
*PGDIR_SIZE
));
68 static inline void pgd_free(pgd_t
*pgd
)
70 pgd_t
*shadow_pgd
= get_shadow_pgd(pgd
);
73 free_pages((unsigned long) shadow_pgd
, PGD_ALLOC_ORDER
);
74 free_pages((unsigned long) pgd
, PGD_ALLOC_ORDER
);
79 * page middle directory allocation/free routines.
80 * We use pmd cache only on s390x, so these are dummy routines. This
81 * code never triggers because the pgd will always be present.
83 #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
84 #define pmd_free(x) do { } while (0)
85 #define __pmd_free_tlb(tlb,x) do { } while (0)
86 #define pgd_populate(mm, pmd, pte) BUG()
87 #define pgd_populate_kernel(mm, pmd, pte) BUG()
89 static inline pmd_t
* pmd_alloc_one(struct mm_struct
*mm
, unsigned long vmaddr
)
91 pmd_t
*pmd
= (pmd_t
*) __get_free_pages(GFP_KERNEL
, PMD_ALLOC_ORDER
);
97 pmd_t
*shadow_pmd
= (pmd_t
*)
98 __get_free_pages(GFP_KERNEL
, PMD_ALLOC_ORDER
);
99 struct page
*page
= virt_to_page(pmd
);
102 free_pages((unsigned long) pmd
, PMD_ALLOC_ORDER
);
105 page
->lru
.next
= (void *) shadow_pmd
;
107 for (i
=0; i
< PTRS_PER_PMD
; i
++)
112 static inline void pmd_free (pmd_t
*pmd
)
114 pmd_t
*shadow_pmd
= get_shadow_pmd(pmd
);
117 free_pages((unsigned long) shadow_pmd
, PMD_ALLOC_ORDER
);
118 free_pages((unsigned long) pmd
, PMD_ALLOC_ORDER
);
121 #define __pmd_free_tlb(tlb,pmd) \
123 tlb_flush_mmu(tlb, 0, 0); \
128 pgd_populate_kernel(struct mm_struct
*mm
, pgd_t
*pgd
, pmd_t
*pmd
)
130 pgd_val(*pgd
) = _PGD_ENTRY
| __pa(pmd
);
133 static inline void pgd_populate(struct mm_struct
*mm
, pgd_t
*pgd
, pmd_t
*pmd
)
135 pgd_t
*shadow_pgd
= get_shadow_pgd(pgd
);
136 pmd_t
*shadow_pmd
= get_shadow_pmd(pmd
);
138 if (shadow_pgd
&& shadow_pmd
)
139 pgd_populate_kernel(mm
, shadow_pgd
, shadow_pmd
);
140 pgd_populate_kernel(mm
, pgd
, pmd
);
143 #endif /* __s390x__ */
146 pmd_populate_kernel(struct mm_struct
*mm
, pmd_t
*pmd
, pte_t
*pte
)
149 pmd_val(pmd
[0]) = _PAGE_TABLE
+ __pa(pte
);
150 pmd_val(pmd
[1]) = _PAGE_TABLE
+ __pa(pte
+256);
151 pmd_val(pmd
[2]) = _PAGE_TABLE
+ __pa(pte
+512);
152 pmd_val(pmd
[3]) = _PAGE_TABLE
+ __pa(pte
+768);
153 #else /* __s390x__ */
154 pmd_val(*pmd
) = _PMD_ENTRY
+ __pa(pte
);
155 pmd_val1(*pmd
) = _PMD_ENTRY
+ __pa(pte
+256);
156 #endif /* __s390x__ */
160 pmd_populate(struct mm_struct
*mm
, pmd_t
*pmd
, struct page
*page
)
162 pte_t
*pte
= (pte_t
*)page_to_phys(page
);
163 pmd_t
*shadow_pmd
= get_shadow_pmd(pmd
);
164 pte_t
*shadow_pte
= get_shadow_pte(pte
);
166 pmd_populate_kernel(mm
, pmd
, pte
);
167 if (shadow_pmd
&& shadow_pte
)
168 pmd_populate_kernel(mm
, shadow_pmd
, shadow_pte
);
172 * page table entry allocation/free routines.
174 static inline pte_t
*
175 pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long vmaddr
)
177 pte_t
*pte
= (pte_t
*) __get_free_page(GFP_KERNEL
|__GFP_REPEAT
);
183 pte_t
*shadow_pte
= (pte_t
*)
184 __get_free_page(GFP_KERNEL
|__GFP_REPEAT
);
185 struct page
*page
= virt_to_page(pte
);
188 free_page((unsigned long) pte
);
191 page
->lru
.next
= (void *) shadow_pte
;
193 for (i
=0; i
< PTRS_PER_PTE
; i
++) {
194 pte_clear(mm
, vmaddr
, pte
+ i
);
200 static inline struct page
*
201 pte_alloc_one(struct mm_struct
*mm
, unsigned long vmaddr
)
203 pte_t
*pte
= pte_alloc_one_kernel(mm
, vmaddr
);
205 return virt_to_page(pte
);
209 static inline void pte_free_kernel(pte_t
*pte
)
211 pte_t
*shadow_pte
= get_shadow_pte(pte
);
214 free_page((unsigned long) shadow_pte
);
215 free_page((unsigned long) pte
);
218 static inline void pte_free(struct page
*pte
)
220 struct page
*shadow_page
= get_shadow_page(pte
);
223 __free_page(shadow_page
);
227 #define __pte_free_tlb(tlb, pte) \
229 struct mmu_gather *__tlb = (tlb); \
230 struct page *__pte = (pte); \
231 struct page *shadow_page = get_shadow_page(__pte); \
233 tlb_remove_page(__tlb, shadow_page); \
234 tlb_remove_page(__tlb, __pte); \
237 #endif /* _S390_PGALLOC_H */