2 * arch/s390/mm/pgtable.c
4 * Copyright IBM Corp. 2007
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/smp.h>
14 #include <linux/highmem.h>
15 #include <linux/slab.h>
16 #include <linux/pagemap.h>
17 #include <linux/spinlock.h>
18 #include <linux/module.h>
19 #include <linux/quicklist.h>
21 #include <asm/system.h>
22 #include <asm/pgtable.h>
23 #include <asm/pgalloc.h>
25 #include <asm/tlbflush.h>
29 #define TABLES_PER_PAGE 4
30 #define FRAG_MASK 15UL
31 #define SECOND_HALVES 10UL
34 #define TABLES_PER_PAGE 2
36 #define SECOND_HALVES 2UL
39 unsigned long *crst_table_alloc(struct mm_struct
*mm
, int noexec
)
41 struct page
*page
= alloc_pages(GFP_KERNEL
, ALLOC_ORDER
);
47 struct page
*shadow
= alloc_pages(GFP_KERNEL
, ALLOC_ORDER
);
49 __free_pages(page
, ALLOC_ORDER
);
52 page
->index
= page_to_phys(shadow
);
54 spin_lock(&mm
->page_table_lock
);
55 list_add(&page
->lru
, &mm
->context
.crst_list
);
56 spin_unlock(&mm
->page_table_lock
);
57 return (unsigned long *) page_to_phys(page
);
60 void crst_table_free(struct mm_struct
*mm
, unsigned long *table
)
62 unsigned long *shadow
= get_shadow_table(table
);
63 struct page
*page
= virt_to_page(table
);
65 spin_lock(&mm
->page_table_lock
);
67 spin_unlock(&mm
->page_table_lock
);
69 free_pages((unsigned long) shadow
, ALLOC_ORDER
);
70 free_pages((unsigned long) table
, ALLOC_ORDER
);
74 * page table entry allocation/free routines.
76 unsigned long *page_table_alloc(struct mm_struct
*mm
)
82 bits
= mm
->context
.noexec
? 3UL : 1UL;
83 spin_lock(&mm
->page_table_lock
);
85 if (!list_empty(&mm
->context
.pgtable_list
)) {
86 page
= list_first_entry(&mm
->context
.pgtable_list
,
88 if ((page
->flags
& FRAG_MASK
) == ((1UL << TABLES_PER_PAGE
) - 1))
92 spin_unlock(&mm
->page_table_lock
);
93 page
= alloc_page(GFP_KERNEL
|__GFP_REPEAT
);
96 pgtable_page_ctor(page
);
97 page
->flags
&= ~FRAG_MASK
;
98 table
= (unsigned long *) page_to_phys(page
);
99 clear_table(table
, _PAGE_TYPE_EMPTY
, PAGE_SIZE
);
100 spin_lock(&mm
->page_table_lock
);
101 list_add(&page
->lru
, &mm
->context
.pgtable_list
);
103 table
= (unsigned long *) page_to_phys(page
);
104 while (page
->flags
& bits
) {
109 if ((page
->flags
& FRAG_MASK
) == ((1UL << TABLES_PER_PAGE
) - 1))
110 list_move_tail(&page
->lru
, &mm
->context
.pgtable_list
);
111 spin_unlock(&mm
->page_table_lock
);
115 void page_table_free(struct mm_struct
*mm
, unsigned long *table
)
120 bits
= mm
->context
.noexec
? 3UL : 1UL;
121 bits
<<= (__pa(table
) & (PAGE_SIZE
- 1)) / 256 / sizeof(unsigned long);
122 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
123 spin_lock(&mm
->page_table_lock
);
125 if (page
->flags
& FRAG_MASK
) {
126 /* Page now has some free pgtable fragments. */
127 list_move(&page
->lru
, &mm
->context
.pgtable_list
);
130 /* All fragments of the 4K page have been freed. */
131 list_del(&page
->lru
);
132 spin_unlock(&mm
->page_table_lock
);
134 pgtable_page_dtor(page
);
139 void disable_noexec(struct mm_struct
*mm
, struct task_struct
*tsk
)
143 spin_lock(&mm
->page_table_lock
);
144 /* Free shadow region and segment tables. */
145 list_for_each_entry(page
, &mm
->context
.crst_list
, lru
)
147 free_pages((unsigned long) page
->index
, ALLOC_ORDER
);
150 /* "Free" second halves of page tables. */
151 list_for_each_entry(page
, &mm
->context
.pgtable_list
, lru
)
152 page
->flags
&= ~SECOND_HALVES
;
153 spin_unlock(&mm
->page_table_lock
);
154 mm
->context
.noexec
= 0;