2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
8 #include "asm/pgalloc.h"
9 #include "asm/pgtable.h"
10 #include "asm/tlbflush.h"
11 #include "choose-mode.h"
12 #include "mode_kern.h"
13 #include "as-layout.h"
19 static int add_mmap(unsigned long virt
, unsigned long phys
, unsigned long len
,
20 unsigned int prot
, struct host_vm_op
*ops
, int *index
,
21 int last_filled
, union mm_context
*mmu
, void **flush
,
22 int (*do_ops
)(union mm_context
*, struct host_vm_op
*,
26 struct host_vm_op
*last
;
29 fd
= phys_mapping(phys
, &offset
);
32 if((last
->type
== MMAP
) &&
33 (last
->u
.mmap
.addr
+ last
->u
.mmap
.len
== virt
) &&
34 (last
->u
.mmap
.prot
== prot
) && (last
->u
.mmap
.fd
== fd
) &&
35 (last
->u
.mmap
.offset
+ last
->u
.mmap
.len
== offset
)){
36 last
->u
.mmap
.len
+= len
;
41 if(*index
== last_filled
){
42 ret
= (*do_ops
)(mmu
, ops
, last_filled
, 0, flush
);
46 ops
[++*index
] = ((struct host_vm_op
) { .type
= MMAP
,
57 static int add_munmap(unsigned long addr
, unsigned long len
,
58 struct host_vm_op
*ops
, int *index
, int last_filled
,
59 union mm_context
*mmu
, void **flush
,
60 int (*do_ops
)(union mm_context
*, struct host_vm_op
*,
63 struct host_vm_op
*last
;
68 if((last
->type
== MUNMAP
) &&
69 (last
->u
.munmap
.addr
+ last
->u
.mmap
.len
== addr
)){
70 last
->u
.munmap
.len
+= len
;
75 if(*index
== last_filled
){
76 ret
= (*do_ops
)(mmu
, ops
, last_filled
, 0, flush
);
80 ops
[++*index
] = ((struct host_vm_op
) { .type
= MUNMAP
,
87 static int add_mprotect(unsigned long addr
, unsigned long len
,
88 unsigned int prot
, struct host_vm_op
*ops
, int *index
,
89 int last_filled
, union mm_context
*mmu
, void **flush
,
90 int (*do_ops
)(union mm_context
*, struct host_vm_op
*,
93 struct host_vm_op
*last
;
98 if((last
->type
== MPROTECT
) &&
99 (last
->u
.mprotect
.addr
+ last
->u
.mprotect
.len
== addr
) &&
100 (last
->u
.mprotect
.prot
== prot
)){
101 last
->u
.mprotect
.len
+= len
;
106 if(*index
== last_filled
){
107 ret
= (*do_ops
)(mmu
, ops
, last_filled
, 0, flush
);
111 ops
[++*index
] = ((struct host_vm_op
) { .type
= MPROTECT
,
119 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
121 static inline int update_pte_range(pmd_t
*pmd
, unsigned long addr
,
122 unsigned long end
, struct host_vm_op
*ops
,
123 int last_op
, int *op_index
, int force
,
124 union mm_context
*mmu
, void **flush
,
125 int (*do_ops
)(union mm_context
*,
126 struct host_vm_op
*, int, int,
130 int r
, w
, x
, prot
, ret
= 0;
132 pte
= pte_offset_kernel(pmd
, addr
);
137 if (!pte_young(*pte
)) {
140 } else if (!pte_dirty(*pte
)) {
143 prot
= ((r
? UM_PROT_READ
: 0) | (w
? UM_PROT_WRITE
: 0) |
144 (x
? UM_PROT_EXEC
: 0));
145 if(force
|| pte_newpage(*pte
)){
146 if(pte_present(*pte
))
147 ret
= add_mmap(addr
, pte_val(*pte
) & PAGE_MASK
,
148 PAGE_SIZE
, prot
, ops
, op_index
,
149 last_op
, mmu
, flush
, do_ops
);
150 else ret
= add_munmap(addr
, PAGE_SIZE
, ops
, op_index
,
151 last_op
, mmu
, flush
, do_ops
);
153 else if(pte_newprot(*pte
))
154 ret
= add_mprotect(addr
, PAGE_SIZE
, prot
, ops
, op_index
,
155 last_op
, mmu
, flush
, do_ops
);
156 *pte
= pte_mkuptodate(*pte
);
157 } while (pte
++, addr
+= PAGE_SIZE
, ((addr
!= end
) && !ret
));
161 static inline int update_pmd_range(pud_t
*pud
, unsigned long addr
,
162 unsigned long end
, struct host_vm_op
*ops
,
163 int last_op
, int *op_index
, int force
,
164 union mm_context
*mmu
, void **flush
,
165 int (*do_ops
)(union mm_context
*,
166 struct host_vm_op
*, int, int,
173 pmd
= pmd_offset(pud
, addr
);
175 next
= pmd_addr_end(addr
, end
);
176 if(!pmd_present(*pmd
)){
177 if(force
|| pmd_newpage(*pmd
)){
178 ret
= add_munmap(addr
, next
- addr
, ops
,
179 op_index
, last_op
, mmu
,
181 pmd_mkuptodate(*pmd
);
184 else ret
= update_pte_range(pmd
, addr
, next
, ops
, last_op
,
185 op_index
, force
, mmu
, flush
,
187 } while (pmd
++, addr
= next
, ((addr
!= end
) && !ret
));
191 static inline int update_pud_range(pgd_t
*pgd
, unsigned long addr
,
192 unsigned long end
, struct host_vm_op
*ops
,
193 int last_op
, int *op_index
, int force
,
194 union mm_context
*mmu
, void **flush
,
195 int (*do_ops
)(union mm_context
*,
196 struct host_vm_op
*, int, int,
203 pud
= pud_offset(pgd
, addr
);
205 next
= pud_addr_end(addr
, end
);
206 if(!pud_present(*pud
)){
207 if(force
|| pud_newpage(*pud
)){
208 ret
= add_munmap(addr
, next
- addr
, ops
,
209 op_index
, last_op
, mmu
,
211 pud_mkuptodate(*pud
);
214 else ret
= update_pmd_range(pud
, addr
, next
, ops
, last_op
,
215 op_index
, force
, mmu
, flush
,
217 } while (pud
++, addr
= next
, ((addr
!= end
) && !ret
));
221 void fix_range_common(struct mm_struct
*mm
, unsigned long start_addr
,
222 unsigned long end_addr
, int force
,
223 int (*do_ops
)(union mm_context
*, struct host_vm_op
*,
227 union mm_context
*mmu
= &mm
->context
;
228 struct host_vm_op ops
[1];
229 unsigned long addr
= start_addr
, next
;
230 int ret
= 0, last_op
= ARRAY_SIZE(ops
) - 1, op_index
= -1;
234 pgd
= pgd_offset(mm
, addr
);
236 next
= pgd_addr_end(addr
, end_addr
);
237 if(!pgd_present(*pgd
)){
238 if (force
|| pgd_newpage(*pgd
)){
239 ret
= add_munmap(addr
, next
- addr
, ops
,
240 &op_index
, last_op
, mmu
,
242 pgd_mkuptodate(*pgd
);
245 else ret
= update_pud_range(pgd
, addr
, next
, ops
, last_op
,
246 &op_index
, force
, mmu
, &flush
,
248 } while (pgd
++, addr
= next
, ((addr
!= end_addr
) && !ret
));
251 ret
= (*do_ops
)(mmu
, ops
, op_index
, 1, &flush
);
253 /* This is not an else because ret is modified above */
255 printk("fix_range_common: failed, killing current process\n");
256 force_sig(SIGKILL
, current
);
260 int flush_tlb_kernel_range_common(unsigned long start
, unsigned long end
)
262 struct mm_struct
*mm
;
267 unsigned long addr
, last
;
268 int updated
= 0, err
;
271 for(addr
= start
; addr
< end
;){
272 pgd
= pgd_offset(mm
, addr
);
273 if(!pgd_present(*pgd
)){
274 last
= ADD_ROUND(addr
, PGDIR_SIZE
);
277 if(pgd_newpage(*pgd
)){
279 err
= os_unmap_memory((void *) addr
,
282 panic("munmap failed, errno = %d\n",
289 pud
= pud_offset(pgd
, addr
);
290 if(!pud_present(*pud
)){
291 last
= ADD_ROUND(addr
, PUD_SIZE
);
294 if(pud_newpage(*pud
)){
296 err
= os_unmap_memory((void *) addr
,
299 panic("munmap failed, errno = %d\n",
306 pmd
= pmd_offset(pud
, addr
);
307 if(!pmd_present(*pmd
)){
308 last
= ADD_ROUND(addr
, PMD_SIZE
);
311 if(pmd_newpage(*pmd
)){
313 err
= os_unmap_memory((void *) addr
,
316 panic("munmap failed, errno = %d\n",
323 pte
= pte_offset_kernel(pmd
, addr
);
324 if(!pte_present(*pte
) || pte_newpage(*pte
)){
326 err
= os_unmap_memory((void *) addr
,
329 panic("munmap failed, errno = %d\n",
331 if(pte_present(*pte
))
333 pte_val(*pte
) & PAGE_MASK
,
336 else if(pte_newprot(*pte
)){
338 os_protect_memory((void *) addr
, PAGE_SIZE
, 1, 1, 1);
345 pgd_t
*pgd_offset_proc(struct mm_struct
*mm
, unsigned long address
)
347 return(pgd_offset(mm
, address
));
350 pud_t
*pud_offset_proc(pgd_t
*pgd
, unsigned long address
)
352 return(pud_offset(pgd
, address
));
355 pmd_t
*pmd_offset_proc(pud_t
*pud
, unsigned long address
)
357 return(pmd_offset(pud
, address
));
360 pte_t
*pte_offset_proc(pmd_t
*pmd
, unsigned long address
)
362 return(pte_offset_kernel(pmd
, address
));
365 pte_t
*addr_pte(struct task_struct
*task
, unsigned long addr
)
367 pgd_t
*pgd
= pgd_offset(task
->mm
, addr
);
368 pud_t
*pud
= pud_offset(pgd
, addr
);
369 pmd_t
*pmd
= pmd_offset(pud
, addr
);
371 return(pte_offset_map(pmd
, addr
));
374 void flush_tlb_all(void)
376 flush_tlb_mm(current
->mm
);
379 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
381 CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt
,
382 flush_tlb_kernel_range_common
, start
, end
);
385 void flush_tlb_kernel_vm(void)
387 CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
388 flush_tlb_kernel_range_common(start_vm
, end_vm
));
391 void __flush_tlb_one(unsigned long addr
)
393 CHOOSE_MODE_PROC(__flush_tlb_one_tt
, __flush_tlb_one_skas
, addr
);
396 void flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
399 CHOOSE_MODE_PROC(flush_tlb_range_tt
, flush_tlb_range_skas
, vma
, start
,
403 void flush_tlb_mm(struct mm_struct
*mm
)
405 CHOOSE_MODE_PROC(flush_tlb_mm_tt
, flush_tlb_mm_skas
, mm
);
408 void force_flush_all(void)
410 CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());