RT-AC66 3.0.0.4.374.130 core
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / arch / um / kernel / skas / tlb.c
blobc0f0693743babd98095f834a73407876afcbe354
1 /*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
5 */
7 #include "linux/stddef.h"
8 #include "linux/sched.h"
9 #include "linux/mm.h"
10 #include "asm/page.h"
11 #include "asm/pgtable.h"
12 #include "asm/mmu.h"
13 #include "mem_user.h"
14 #include "mem.h"
15 #include "skas.h"
16 #include "os.h"
17 #include "tlb.h"
19 static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
20 int finished, void **flush)
22 struct host_vm_op *op;
23 int i, ret = 0;
25 for(i = 0; i <= last && !ret; i++){
26 op = &ops[i];
27 switch(op->type){
28 case MMAP:
29 ret = map(&mmu->skas.id, op->u.mmap.addr,
30 op->u.mmap.len, op->u.mmap.prot,
31 op->u.mmap.fd, op->u.mmap.offset, finished,
32 flush);
33 break;
34 case MUNMAP:
35 ret = unmap(&mmu->skas.id, op->u.munmap.addr,
36 op->u.munmap.len, finished, flush);
37 break;
38 case MPROTECT:
39 ret = protect(&mmu->skas.id, op->u.mprotect.addr,
40 op->u.mprotect.len, op->u.mprotect.prot,
41 finished, flush);
42 break;
43 default:
44 printk("Unknown op type %d in do_ops\n", op->type);
45 break;
49 return ret;
52 extern int proc_mm;
54 static void fix_range(struct mm_struct *mm, unsigned long start_addr,
55 unsigned long end_addr, int force)
57 if(!proc_mm && (end_addr > CONFIG_STUB_START))
58 end_addr = CONFIG_STUB_START;
60 fix_range_common(mm, start_addr, end_addr, force, do_ops);
63 void __flush_tlb_one_skas(unsigned long addr)
65 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
68 void flush_tlb_range_skas(struct vm_area_struct *vma, unsigned long start,
69 unsigned long end)
71 if(vma->vm_mm == NULL)
72 flush_tlb_kernel_range_common(start, end);
73 else fix_range(vma->vm_mm, start, end, 0);
76 void flush_tlb_mm_skas(struct mm_struct *mm)
78 unsigned long end;
80 /* Don't bother flushing if this address space is about to be
81 * destroyed.
83 if(atomic_read(&mm->mm_users) == 0)
84 return;
86 end = proc_mm ? task_size : CONFIG_STUB_START;
87 fix_range(mm, 0, end, 0);
90 void force_flush_all_skas(void)
92 struct mm_struct *mm = current->mm;
93 struct vm_area_struct *vma = mm->mmap;
95 while(vma != NULL) {
96 fix_range(mm, vma->vm_start, vma->vm_end, 1);
97 vma = vma->vm_next;
101 void flush_tlb_page_skas(struct vm_area_struct *vma, unsigned long address)
103 pgd_t *pgd;
104 pud_t *pud;
105 pmd_t *pmd;
106 pte_t *pte;
107 struct mm_struct *mm = vma->vm_mm;
108 void *flush = NULL;
109 int r, w, x, prot, err = 0;
110 struct mm_id *mm_id;
112 pgd = pgd_offset(mm, address);
113 if(!pgd_present(*pgd))
114 goto kill;
116 pud = pud_offset(pgd, address);
117 if(!pud_present(*pud))
118 goto kill;
120 pmd = pmd_offset(pud, address);
121 if(!pmd_present(*pmd))
122 goto kill;
124 pte = pte_offset_kernel(pmd, address);
126 r = pte_read(*pte);
127 w = pte_write(*pte);
128 x = pte_exec(*pte);
129 if (!pte_young(*pte)) {
130 r = 0;
131 w = 0;
132 } else if (!pte_dirty(*pte)) {
133 w = 0;
136 mm_id = &mm->context.skas.id;
137 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
138 (x ? UM_PROT_EXEC : 0));
139 if(pte_newpage(*pte)){
140 if(pte_present(*pte)){
141 unsigned long long offset;
142 int fd;
144 fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
145 err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
146 1, &flush);
148 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
150 else if(pte_newprot(*pte))
151 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
153 if(err)
154 goto kill;
156 *pte = pte_mkuptodate(*pte);
158 return;
160 kill:
161 printk("Failed to flush page for address 0x%lx\n", address);
162 force_sig(SIGKILL, current);