2 * arch/s390/mm/pgtable.c
4 * Copyright IBM Corp. 2007
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/smp.h>
14 #include <linux/highmem.h>
15 #include <linux/slab.h>
16 #include <linux/pagemap.h>
17 #include <linux/spinlock.h>
18 #include <linux/module.h>
19 #include <linux/quicklist.h>
21 #include <asm/system.h>
22 #include <asm/pgtable.h>
23 #include <asm/pgalloc.h>
25 #include <asm/tlbflush.h>
26 #include <asm/mmu_context.h>
30 #define TABLES_PER_PAGE 4
31 #define FRAG_MASK 15UL
32 #define SECOND_HALVES 10UL
35 #define TABLES_PER_PAGE 2
37 #define SECOND_HALVES 2UL
40 unsigned long *crst_table_alloc(struct mm_struct
*mm
, int noexec
)
42 struct page
*page
= alloc_pages(GFP_KERNEL
, ALLOC_ORDER
);
48 struct page
*shadow
= alloc_pages(GFP_KERNEL
, ALLOC_ORDER
);
50 __free_pages(page
, ALLOC_ORDER
);
53 page
->index
= page_to_phys(shadow
);
55 spin_lock(&mm
->page_table_lock
);
56 list_add(&page
->lru
, &mm
->context
.crst_list
);
57 spin_unlock(&mm
->page_table_lock
);
58 return (unsigned long *) page_to_phys(page
);
61 void crst_table_free(struct mm_struct
*mm
, unsigned long *table
)
63 unsigned long *shadow
= get_shadow_table(table
);
64 struct page
*page
= virt_to_page(table
);
66 spin_lock(&mm
->page_table_lock
);
68 spin_unlock(&mm
->page_table_lock
);
70 free_pages((unsigned long) shadow
, ALLOC_ORDER
);
71 free_pages((unsigned long) table
, ALLOC_ORDER
);
75 int crst_table_upgrade(struct mm_struct
*mm
, unsigned long limit
)
77 unsigned long *table
, *pgd
;
80 BUG_ON(limit
> (1UL << 53));
82 table
= crst_table_alloc(mm
, mm
->context
.noexec
);
85 spin_lock(&mm
->page_table_lock
);
86 if (mm
->context
.asce_limit
< limit
) {
87 pgd
= (unsigned long *) mm
->pgd
;
88 if (mm
->context
.asce_limit
<= (1UL << 31)) {
89 entry
= _REGION3_ENTRY_EMPTY
;
90 mm
->context
.asce_limit
= 1UL << 42;
91 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
|
95 entry
= _REGION2_ENTRY_EMPTY
;
96 mm
->context
.asce_limit
= 1UL << 53;
97 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
|
101 crst_table_init(table
, entry
);
102 pgd_populate(mm
, (pgd_t
*) table
, (pud_t
*) pgd
);
103 mm
->pgd
= (pgd_t
*) table
;
106 spin_unlock(&mm
->page_table_lock
);
108 crst_table_free(mm
, table
);
109 if (mm
->context
.asce_limit
< limit
)
111 update_mm(mm
, current
);
115 void crst_table_downgrade(struct mm_struct
*mm
, unsigned long limit
)
119 if (mm
->context
.asce_limit
<= limit
)
122 while (mm
->context
.asce_limit
> limit
) {
124 switch (pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) {
125 case _REGION_ENTRY_TYPE_R2
:
126 mm
->context
.asce_limit
= 1UL << 42;
127 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
|
131 case _REGION_ENTRY_TYPE_R3
:
132 mm
->context
.asce_limit
= 1UL << 31;
133 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
|
140 mm
->pgd
= (pgd_t
*) (pgd_val(*pgd
) & _REGION_ENTRY_ORIGIN
);
141 crst_table_free(mm
, (unsigned long *) pgd
);
143 update_mm(mm
, current
);
148 * page table entry allocation/free routines.
150 unsigned long *page_table_alloc(struct mm_struct
*mm
)
153 unsigned long *table
;
156 bits
= mm
->context
.noexec
? 3UL : 1UL;
157 spin_lock(&mm
->page_table_lock
);
159 if (!list_empty(&mm
->context
.pgtable_list
)) {
160 page
= list_first_entry(&mm
->context
.pgtable_list
,
162 if ((page
->flags
& FRAG_MASK
) == ((1UL << TABLES_PER_PAGE
) - 1))
166 spin_unlock(&mm
->page_table_lock
);
167 page
= alloc_page(GFP_KERNEL
|__GFP_REPEAT
);
170 pgtable_page_ctor(page
);
171 page
->flags
&= ~FRAG_MASK
;
172 table
= (unsigned long *) page_to_phys(page
);
173 clear_table(table
, _PAGE_TYPE_EMPTY
, PAGE_SIZE
);
174 spin_lock(&mm
->page_table_lock
);
175 list_add(&page
->lru
, &mm
->context
.pgtable_list
);
177 table
= (unsigned long *) page_to_phys(page
);
178 while (page
->flags
& bits
) {
183 if ((page
->flags
& FRAG_MASK
) == ((1UL << TABLES_PER_PAGE
) - 1))
184 list_move_tail(&page
->lru
, &mm
->context
.pgtable_list
);
185 spin_unlock(&mm
->page_table_lock
);
189 void page_table_free(struct mm_struct
*mm
, unsigned long *table
)
194 bits
= mm
->context
.noexec
? 3UL : 1UL;
195 bits
<<= (__pa(table
) & (PAGE_SIZE
- 1)) / 256 / sizeof(unsigned long);
196 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
197 spin_lock(&mm
->page_table_lock
);
199 if (page
->flags
& FRAG_MASK
) {
200 /* Page now has some free pgtable fragments. */
201 list_move(&page
->lru
, &mm
->context
.pgtable_list
);
204 /* All fragments of the 4K page have been freed. */
205 list_del(&page
->lru
);
206 spin_unlock(&mm
->page_table_lock
);
208 pgtable_page_dtor(page
);
213 void disable_noexec(struct mm_struct
*mm
, struct task_struct
*tsk
)
217 spin_lock(&mm
->page_table_lock
);
218 /* Free shadow region and segment tables. */
219 list_for_each_entry(page
, &mm
->context
.crst_list
, lru
)
221 free_pages((unsigned long) page
->index
, ALLOC_ORDER
);
224 /* "Free" second halves of page tables. */
225 list_for_each_entry(page
, &mm
->context
.pgtable_list
, lru
)
226 page
->flags
&= ~SECOND_HALVES
;
227 spin_unlock(&mm
->page_table_lock
);
228 mm
->context
.noexec
= 0;