2 * Copyright 2007-2008 Paul Mackerras, IBM Corp.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/gfp.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
16 #include <linux/hugetlb.h>
18 #include <asm/pgtable.h>
19 #include <asm/uaccess.h>
20 #include <asm/tlbflush.h>
23 * Free all pages allocated for subpage protection maps and pointers.
24 * Also makes sure that the subpage_prot_table structure is
25 * reinitialized for the next user.
27 void subpage_prot_free(pgd_t
*pgd
)
29 struct subpage_prot_table
*spt
= pgd_subpage_prot(pgd
);
30 unsigned long i
, j
, addr
;
33 for (i
= 0; i
< 4; ++i
) {
34 if (spt
->low_prot
[i
]) {
35 free_page((unsigned long)spt
->low_prot
[i
]);
36 spt
->low_prot
[i
] = NULL
;
40 for (i
= 0; i
< 2; ++i
) {
44 spt
->protptrs
[i
] = NULL
;
45 for (j
= 0; j
< SBP_L2_COUNT
&& addr
< spt
->maxaddr
;
46 ++j
, addr
+= PAGE_SIZE
)
48 free_page((unsigned long)p
[j
]);
49 free_page((unsigned long)p
);
54 static void hpte_flush_range(struct mm_struct
*mm
, unsigned long addr
,
63 pgd
= pgd_offset(mm
, addr
);
66 pud
= pud_offset(pgd
, addr
);
69 pmd
= pmd_offset(pud
, addr
);
72 pte
= pte_offset_map_lock(mm
, pmd
, addr
, &ptl
);
73 arch_enter_lazy_mmu_mode();
74 for (; npages
> 0; --npages
) {
75 pte_update(mm
, addr
, pte
, 0, 0);
79 arch_leave_lazy_mmu_mode();
80 pte_unmap_unlock(pte
- 1, ptl
);
84 * Clear the subpage protection map for an address range, allowing
85 * all accesses that are allowed by the pte permissions.
87 static void subpage_prot_clear(unsigned long addr
, unsigned long len
)
89 struct mm_struct
*mm
= current
->mm
;
90 struct subpage_prot_table
*spt
= pgd_subpage_prot(mm
->pgd
);
93 unsigned long next
, limit
;
95 down_write(&mm
->mmap_sem
);
97 if (limit
> spt
->maxaddr
)
99 for (; addr
< limit
; addr
= next
) {
100 next
= pmd_addr_end(addr
, limit
);
101 if (addr
< 0x100000000) {
104 spm
= spt
->protptrs
[addr
>> SBP_L3_SHIFT
];
108 spp
= spm
[(addr
>> SBP_L2_SHIFT
) & (SBP_L2_COUNT
- 1)];
111 spp
+= (addr
>> PAGE_SHIFT
) & (SBP_L1_COUNT
- 1);
113 i
= (addr
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
114 nw
= PTRS_PER_PTE
- i
;
115 if (addr
+ (nw
<< PAGE_SHIFT
) > next
)
116 nw
= (next
- addr
) >> PAGE_SHIFT
;
118 memset(spp
, 0, nw
* sizeof(u32
));
120 /* now flush any existing HPTEs for the range */
121 hpte_flush_range(mm
, addr
, nw
);
123 up_write(&mm
->mmap_sem
);
127 * Copy in a subpage protection map for an address range.
128 * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
129 * Each 2-bit field is 0 to allow any access, 1 to prevent writes,
130 * 2 or 3 to prevent all accesses.
131 * Note that the normal page protections also apply; the subpage
132 * protection mechanism is an additional constraint, so putting 0
133 * in a 2-bit field won't allow writes to a page that is otherwise
136 long sys_subpage_prot(unsigned long addr
, unsigned long len
, u32 __user
*map
)
138 struct mm_struct
*mm
= current
->mm
;
139 struct subpage_prot_table
*spt
= pgd_subpage_prot(mm
->pgd
);
142 unsigned long next
, limit
;
145 /* Check parameters */
146 if ((addr
& ~PAGE_MASK
) || (len
& ~PAGE_MASK
) ||
147 addr
>= TASK_SIZE
|| len
>= TASK_SIZE
|| addr
+ len
> TASK_SIZE
)
150 if (is_hugepage_only_range(mm
, addr
, len
))
154 /* Clear out the protection map for the address range */
155 subpage_prot_clear(addr
, len
);
159 if (!access_ok(VERIFY_READ
, map
, (len
>> PAGE_SHIFT
) * sizeof(u32
)))
162 down_write(&mm
->mmap_sem
);
163 for (limit
= addr
+ len
; addr
< limit
; addr
= next
) {
164 next
= pmd_addr_end(addr
, limit
);
166 if (addr
< 0x100000000) {
169 spm
= spt
->protptrs
[addr
>> SBP_L3_SHIFT
];
171 spm
= (u32
**)get_zeroed_page(GFP_KERNEL
);
174 spt
->protptrs
[addr
>> SBP_L3_SHIFT
] = spm
;
177 spm
+= (addr
>> SBP_L2_SHIFT
) & (SBP_L2_COUNT
- 1);
180 spp
= (u32
*)get_zeroed_page(GFP_KERNEL
);
185 spp
+= (addr
>> PAGE_SHIFT
) & (SBP_L1_COUNT
- 1);
188 demote_segment_4k(mm
, addr
);
191 i
= (addr
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
192 nw
= PTRS_PER_PTE
- i
;
193 if (addr
+ (nw
<< PAGE_SHIFT
) > next
)
194 nw
= (next
- addr
) >> PAGE_SHIFT
;
196 up_write(&mm
->mmap_sem
);
198 if (__copy_from_user(spp
, map
, nw
* sizeof(u32
)))
201 down_write(&mm
->mmap_sem
);
203 /* now flush any existing HPTEs for the range */
204 hpte_flush_range(mm
, addr
, nw
);
206 if (limit
> spt
->maxaddr
)
207 spt
->maxaddr
= limit
;
210 up_write(&mm
->mmap_sem
);