[PATCH] uml: Use ARRAY_SIZE more assiduously
[linux-2.6/kvm.git] / arch / um / kernel / tlb.c
blobcca330edf717ebc5ed7c19289c467ecce4c9c307
1 /*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
6 #include "linux/mm.h"
7 #include "asm/page.h"
8 #include "asm/pgalloc.h"
9 #include "asm/tlbflush.h"
10 #include "choose-mode.h"
11 #include "mode_kern.h"
12 #include "user_util.h"
13 #include "tlb.h"
14 #include "mem.h"
15 #include "mem_user.h"
16 #include "os.h"
18 static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
19 int r, int w, int x, struct host_vm_op *ops, int *index,
20 int last_filled, union mm_context *mmu, void **flush,
21 int (*do_ops)(union mm_context *, struct host_vm_op *,
22 int, int, void **))
24 __u64 offset;
25 struct host_vm_op *last;
26 int fd, ret = 0;
28 fd = phys_mapping(phys, &offset);
29 if(*index != -1){
30 last = &ops[*index];
31 if((last->type == MMAP) &&
32 (last->u.mmap.addr + last->u.mmap.len == virt) &&
33 (last->u.mmap.r == r) && (last->u.mmap.w == w) &&
34 (last->u.mmap.x == x) && (last->u.mmap.fd == fd) &&
35 (last->u.mmap.offset + last->u.mmap.len == offset)){
36 last->u.mmap.len += len;
37 return 0;
41 if(*index == last_filled){
42 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
43 *index = -1;
46 ops[++*index] = ((struct host_vm_op) { .type = MMAP,
47 .u = { .mmap = {
48 .addr = virt,
49 .len = len,
50 .r = r,
51 .w = w,
52 .x = x,
53 .fd = fd,
54 .offset = offset }
55 } });
56 return ret;
59 static int add_munmap(unsigned long addr, unsigned long len,
60 struct host_vm_op *ops, int *index, int last_filled,
61 union mm_context *mmu, void **flush,
62 int (*do_ops)(union mm_context *, struct host_vm_op *,
63 int, int, void **))
65 struct host_vm_op *last;
66 int ret = 0;
68 if(*index != -1){
69 last = &ops[*index];
70 if((last->type == MUNMAP) &&
71 (last->u.munmap.addr + last->u.mmap.len == addr)){
72 last->u.munmap.len += len;
73 return 0;
77 if(*index == last_filled){
78 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
79 *index = -1;
82 ops[++*index] = ((struct host_vm_op) { .type = MUNMAP,
83 .u = { .munmap = {
84 .addr = addr,
85 .len = len } } });
86 return ret;
89 static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
90 int x, struct host_vm_op *ops, int *index,
91 int last_filled, union mm_context *mmu, void **flush,
92 int (*do_ops)(union mm_context *, struct host_vm_op *,
93 int, int, void **))
95 struct host_vm_op *last;
96 int ret = 0;
98 if(*index != -1){
99 last = &ops[*index];
100 if((last->type == MPROTECT) &&
101 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
102 (last->u.mprotect.r == r) && (last->u.mprotect.w == w) &&
103 (last->u.mprotect.x == x)){
104 last->u.mprotect.len += len;
105 return 0;
109 if(*index == last_filled){
110 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
111 *index = -1;
114 ops[++*index] = ((struct host_vm_op) { .type = MPROTECT,
115 .u = { .mprotect = {
116 .addr = addr,
117 .len = len,
118 .r = r,
119 .w = w,
120 .x = x } } });
121 return ret;
124 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
126 void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
127 unsigned long end_addr, int force,
128 int (*do_ops)(union mm_context *, struct host_vm_op *,
129 int, int, void **))
131 pgd_t *npgd;
132 pud_t *npud;
133 pmd_t *npmd;
134 pte_t *npte;
135 union mm_context *mmu = &mm->context;
136 unsigned long addr, end;
137 int r, w, x;
138 struct host_vm_op ops[1];
139 void *flush = NULL;
140 int op_index = -1, last_op = ARRAY_SIZE(ops) - 1;
141 int ret = 0;
143 if(mm == NULL)
144 return;
146 ops[0].type = NONE;
147 for(addr = start_addr; addr < end_addr && !ret;){
148 npgd = pgd_offset(mm, addr);
149 if(!pgd_present(*npgd)){
150 end = ADD_ROUND(addr, PGDIR_SIZE);
151 if(end > end_addr)
152 end = end_addr;
153 if(force || pgd_newpage(*npgd)){
154 ret = add_munmap(addr, end - addr, ops,
155 &op_index, last_op, mmu,
156 &flush, do_ops);
157 pgd_mkuptodate(*npgd);
159 addr = end;
160 continue;
163 npud = pud_offset(npgd, addr);
164 if(!pud_present(*npud)){
165 end = ADD_ROUND(addr, PUD_SIZE);
166 if(end > end_addr)
167 end = end_addr;
168 if(force || pud_newpage(*npud)){
169 ret = add_munmap(addr, end - addr, ops,
170 &op_index, last_op, mmu,
171 &flush, do_ops);
172 pud_mkuptodate(*npud);
174 addr = end;
175 continue;
178 npmd = pmd_offset(npud, addr);
179 if(!pmd_present(*npmd)){
180 end = ADD_ROUND(addr, PMD_SIZE);
181 if(end > end_addr)
182 end = end_addr;
183 if(force || pmd_newpage(*npmd)){
184 ret = add_munmap(addr, end - addr, ops,
185 &op_index, last_op, mmu,
186 &flush, do_ops);
187 pmd_mkuptodate(*npmd);
189 addr = end;
190 continue;
193 npte = pte_offset_kernel(npmd, addr);
194 r = pte_read(*npte);
195 w = pte_write(*npte);
196 x = pte_exec(*npte);
197 if (!pte_young(*npte)) {
198 r = 0;
199 w = 0;
200 } else if (!pte_dirty(*npte)) {
201 w = 0;
203 if(force || pte_newpage(*npte)){
204 if(pte_present(*npte))
205 ret = add_mmap(addr,
206 pte_val(*npte) & PAGE_MASK,
207 PAGE_SIZE, r, w, x, ops,
208 &op_index, last_op, mmu,
209 &flush, do_ops);
210 else ret = add_munmap(addr, PAGE_SIZE, ops,
211 &op_index, last_op, mmu,
212 &flush, do_ops);
214 else if(pte_newprot(*npte))
215 ret = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
216 &op_index, last_op, mmu,
217 &flush, do_ops);
219 *npte = pte_mkuptodate(*npte);
220 addr += PAGE_SIZE;
223 if(!ret)
224 ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
226 /* This is not an else because ret is modified above */
227 if(ret) {
228 printk("fix_range_common: failed, killing current process\n");
229 force_sig(SIGKILL, current);
233 int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
235 struct mm_struct *mm;
236 pgd_t *pgd;
237 pud_t *pud;
238 pmd_t *pmd;
239 pte_t *pte;
240 unsigned long addr, last;
241 int updated = 0, err;
243 mm = &init_mm;
244 for(addr = start; addr < end;){
245 pgd = pgd_offset(mm, addr);
246 if(!pgd_present(*pgd)){
247 last = ADD_ROUND(addr, PGDIR_SIZE);
248 if(last > end)
249 last = end;
250 if(pgd_newpage(*pgd)){
251 updated = 1;
252 err = os_unmap_memory((void *) addr,
253 last - addr);
254 if(err < 0)
255 panic("munmap failed, errno = %d\n",
256 -err);
258 addr = last;
259 continue;
262 pud = pud_offset(pgd, addr);
263 if(!pud_present(*pud)){
264 last = ADD_ROUND(addr, PUD_SIZE);
265 if(last > end)
266 last = end;
267 if(pud_newpage(*pud)){
268 updated = 1;
269 err = os_unmap_memory((void *) addr,
270 last - addr);
271 if(err < 0)
272 panic("munmap failed, errno = %d\n",
273 -err);
275 addr = last;
276 continue;
279 pmd = pmd_offset(pud, addr);
280 if(!pmd_present(*pmd)){
281 last = ADD_ROUND(addr, PMD_SIZE);
282 if(last > end)
283 last = end;
284 if(pmd_newpage(*pmd)){
285 updated = 1;
286 err = os_unmap_memory((void *) addr,
287 last - addr);
288 if(err < 0)
289 panic("munmap failed, errno = %d\n",
290 -err);
292 addr = last;
293 continue;
296 pte = pte_offset_kernel(pmd, addr);
297 if(!pte_present(*pte) || pte_newpage(*pte)){
298 updated = 1;
299 err = os_unmap_memory((void *) addr,
300 PAGE_SIZE);
301 if(err < 0)
302 panic("munmap failed, errno = %d\n",
303 -err);
304 if(pte_present(*pte))
305 map_memory(addr,
306 pte_val(*pte) & PAGE_MASK,
307 PAGE_SIZE, 1, 1, 1);
309 else if(pte_newprot(*pte)){
310 updated = 1;
311 os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
313 addr += PAGE_SIZE;
315 return(updated);
318 pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
320 return(pgd_offset(mm, address));
323 pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
325 return(pud_offset(pgd, address));
328 pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
330 return(pmd_offset(pud, address));
333 pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
335 return(pte_offset_kernel(pmd, address));
338 pte_t *addr_pte(struct task_struct *task, unsigned long addr)
340 pgd_t *pgd = pgd_offset(task->mm, addr);
341 pud_t *pud = pud_offset(pgd, addr);
342 pmd_t *pmd = pmd_offset(pud, addr);
344 return(pte_offset_map(pmd, addr));
347 void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
349 address &= PAGE_MASK;
350 flush_tlb_range(vma, address, address + PAGE_SIZE);
353 void flush_tlb_all(void)
355 flush_tlb_mm(current->mm);
358 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
360 CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt,
361 flush_tlb_kernel_range_common, start, end);
364 void flush_tlb_kernel_vm(void)
366 CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
367 flush_tlb_kernel_range_common(start_vm, end_vm));
370 void __flush_tlb_one(unsigned long addr)
372 CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
375 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
376 unsigned long end)
378 CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start,
379 end);
382 void flush_tlb_mm(struct mm_struct *mm)
384 CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
387 void force_flush_all(void)
389 CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());