allow coexistance of N build and AC build.
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / arch / sh / mm / fault.c
blobf8e9cc2c036d6d6af7ee0b14b20bbc7365aa93ad
1 /*
2 * Page fault handler for SH with an MMU.
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 - 2007 Paul Mundt
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <asm/system.h>
19 #include <asm/mmu_context.h>
20 #include <asm/tlbflush.h>
21 #include <asm/kgdb.h>
24 * This routine handles page faults. It determines the address,
25 * and the problem, and then passes it off to one of the appropriate
26 * routines.
28 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
29 unsigned long writeaccess,
30 unsigned long address)
32 struct task_struct *tsk;
33 struct mm_struct *mm;
34 struct vm_area_struct * vma;
35 unsigned long page;
36 int si_code;
37 int fault;
38 siginfo_t info;
40 trace_hardirqs_on();
41 local_irq_enable();
43 #ifdef CONFIG_SH_KGDB
44 if (kgdb_nofault && kgdb_bus_err_hook)
45 kgdb_bus_err_hook();
46 #endif
48 tsk = current;
49 mm = tsk->mm;
50 si_code = SEGV_MAPERR;
52 if (unlikely(address >= TASK_SIZE)) {
54 * Synchronize this task's top level page-table
55 * with the 'reference' page table.
57 * Do _not_ use "tsk" here. We might be inside
58 * an interrupt in the middle of a task switch..
60 int offset = pgd_index(address);
61 pgd_t *pgd, *pgd_k;
62 pud_t *pud, *pud_k;
63 pmd_t *pmd, *pmd_k;
65 pgd = get_TTB() + offset;
66 pgd_k = swapper_pg_dir + offset;
68 /* This will never happen with the folded page table. */
69 if (!pgd_present(*pgd)) {
70 if (!pgd_present(*pgd_k))
71 goto bad_area_nosemaphore;
72 set_pgd(pgd, *pgd_k);
73 return;
76 pud = pud_offset(pgd, address);
77 pud_k = pud_offset(pgd_k, address);
78 if (pud_present(*pud) || !pud_present(*pud_k))
79 goto bad_area_nosemaphore;
80 set_pud(pud, *pud_k);
82 pmd = pmd_offset(pud, address);
83 pmd_k = pmd_offset(pud_k, address);
84 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
85 goto bad_area_nosemaphore;
86 set_pmd(pmd, *pmd_k);
88 return;
92 * If we're in an interrupt or have no user
93 * context, we must not take the fault..
95 if (in_atomic() || !mm)
96 goto no_context;
98 down_read(&mm->mmap_sem);
100 vma = find_vma(mm, address);
101 if (!vma)
102 goto bad_area;
103 if (vma->vm_start <= address)
104 goto good_area;
105 if (!(vma->vm_flags & VM_GROWSDOWN))
106 goto bad_area;
107 if (expand_stack(vma, address))
108 goto bad_area;
110 * Ok, we have a good vm_area for this memory access, so
111 * we can handle it..
113 good_area:
114 si_code = SEGV_ACCERR;
115 if (writeaccess) {
116 if (!(vma->vm_flags & VM_WRITE))
117 goto bad_area;
118 } else {
119 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
120 goto bad_area;
124 * If for any reason at all we couldn't handle the fault,
125 * make sure we exit gracefully rather than endlessly redo
126 * the fault.
128 survive:
129 fault = handle_mm_fault(mm, vma, address, writeaccess);
130 if (unlikely(fault & VM_FAULT_ERROR)) {
131 if (fault & VM_FAULT_OOM)
132 goto out_of_memory;
133 else if (fault & VM_FAULT_SIGBUS)
134 goto do_sigbus;
135 BUG();
137 if (fault & VM_FAULT_MAJOR)
138 tsk->maj_flt++;
139 else
140 tsk->min_flt++;
142 up_read(&mm->mmap_sem);
143 return;
146 * Something tried to access memory that isn't in our memory map..
147 * Fix it, but check if it's kernel or user first..
149 bad_area:
150 up_read(&mm->mmap_sem);
152 bad_area_nosemaphore:
153 if (user_mode(regs)) {
154 info.si_signo = SIGSEGV;
155 info.si_errno = 0;
156 info.si_code = si_code;
157 info.si_addr = (void *) address;
158 force_sig_info(SIGSEGV, &info, tsk);
159 return;
162 no_context:
163 /* Are we prepared to handle this kernel fault? */
164 if (fixup_exception(regs))
165 return;
168 * Oops. The kernel tried to access some bad page. We'll have to
169 * terminate things with extreme prejudice.
172 if (address < PAGE_SIZE)
173 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
174 else
175 printk(KERN_ALERT "Unable to handle kernel paging request");
176 printk(" at virtual address %08lx\n", address);
177 printk(KERN_ALERT "pc = %08lx\n", regs->pc);
178 page = (unsigned long)get_TTB();
179 if (page) {
180 page = ((unsigned long *) page)[address >> PGDIR_SHIFT];
181 printk(KERN_ALERT "*pde = %08lx\n", page);
182 if (page & _PAGE_PRESENT) {
183 page &= PAGE_MASK;
184 address &= 0x003ff000;
185 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
186 printk(KERN_ALERT "*pte = %08lx\n", page);
189 die("Oops", regs, writeaccess);
190 do_exit(SIGKILL);
193 * We ran out of memory, or some other thing happened to us that made
194 * us unable to handle the page fault gracefully.
196 out_of_memory:
197 up_read(&mm->mmap_sem);
198 if (is_init(current)) {
199 yield();
200 down_read(&mm->mmap_sem);
201 goto survive;
203 printk("VM: killing process %s\n", tsk->comm);
204 if (user_mode(regs))
205 do_exit(SIGKILL);
206 goto no_context;
208 do_sigbus:
209 up_read(&mm->mmap_sem);
212 * Send a sigbus, regardless of whether we were in kernel
213 * or user mode.
215 info.si_signo = SIGBUS;
216 info.si_errno = 0;
217 info.si_code = BUS_ADRERR;
218 info.si_addr = (void *)address;
219 force_sig_info(SIGBUS, &info, tsk);
221 /* Kernel mode? Handle exceptions or die */
222 if (!user_mode(regs))
223 goto no_context;
226 #ifdef CONFIG_SH_STORE_QUEUES
228 * This is a special case for the SH-4 store queues, as pages for this
229 * space still need to be faulted in before it's possible to flush the
230 * store queue cache for writeout to the remapped region.
232 #define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
233 #else
234 #define P3_ADDR_MAX P4SEG
235 #endif
238 * Called with interrupts disabled.
240 asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
241 unsigned long writeaccess,
242 unsigned long address)
244 pgd_t *pgd;
245 pud_t *pud;
246 pmd_t *pmd;
247 pte_t *pte;
248 pte_t entry;
249 struct mm_struct *mm = current->mm;
250 spinlock_t *ptl = NULL;
251 int ret = 1;
253 #ifdef CONFIG_SH_KGDB
254 if (kgdb_nofault && kgdb_bus_err_hook)
255 kgdb_bus_err_hook();
256 #endif
259 * We don't take page faults for P1, P2, and parts of P4, these
260 * are always mapped, whether it be due to legacy behaviour in
261 * 29-bit mode, or due to PMB configuration in 32-bit mode.
263 if (address >= P3SEG && address < P3_ADDR_MAX) {
264 pgd = pgd_offset_k(address);
265 mm = NULL;
266 } else {
267 if (unlikely(address >= TASK_SIZE || !mm))
268 return 1;
270 pgd = pgd_offset(mm, address);
273 pud = pud_offset(pgd, address);
274 if (pud_none_or_clear_bad(pud))
275 return 1;
276 pmd = pmd_offset(pud, address);
277 if (pmd_none_or_clear_bad(pmd))
278 return 1;
280 if (mm)
281 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
282 else
283 pte = pte_offset_kernel(pmd, address);
285 entry = *pte;
286 if (unlikely(pte_none(entry) || pte_not_present(entry)))
287 goto unlock;
288 if (unlikely(writeaccess && !pte_write(entry)))
289 goto unlock;
291 if (writeaccess)
292 entry = pte_mkdirty(entry);
293 entry = pte_mkyoung(entry);
295 #ifdef CONFIG_CPU_SH4
297 * ITLB is not affected by "ldtlb" instruction.
298 * So, we need to flush the entry by ourselves.
300 local_flush_tlb_one(get_asid(), address & PAGE_MASK);
301 #endif
303 set_pte(pte, entry);
304 update_mmu_cache(NULL, address, entry);
305 ret = 0;
306 unlock:
307 if (mm)
308 pte_unmap_unlock(pte, ptl);
309 return ret;