Merge tag 'gpio-v3.13-3' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[linux-2.6.git] / arch / sh / mm / fault.c
blob541dc610150888e706977c7944c42ab1d61d7437
1 /*
2 * Page fault handler for SH with an MMU.
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 - 2012 Paul Mundt
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <linux/perf_event.h>
19 #include <linux/kdebug.h>
20 #include <asm/io_trapped.h>
21 #include <asm/mmu_context.h>
22 #include <asm/tlbflush.h>
23 #include <asm/traps.h>
25 static inline int notify_page_fault(struct pt_regs *regs, int trap)
27 int ret = 0;
29 if (kprobes_built_in() && !user_mode(regs)) {
30 preempt_disable();
31 if (kprobe_running() && kprobe_fault_handler(regs, trap))
32 ret = 1;
33 preempt_enable();
36 return ret;
39 static void
40 force_sig_info_fault(int si_signo, int si_code, unsigned long address,
41 struct task_struct *tsk)
43 siginfo_t info;
45 info.si_signo = si_signo;
46 info.si_errno = 0;
47 info.si_code = si_code;
48 info.si_addr = (void __user *)address;
50 force_sig_info(si_signo, &info, tsk);
54 * This is useful to dump out the page tables associated with
55 * 'addr' in mm 'mm'.
57 static void show_pte(struct mm_struct *mm, unsigned long addr)
59 pgd_t *pgd;
61 if (mm) {
62 pgd = mm->pgd;
63 } else {
64 pgd = get_TTB();
66 if (unlikely(!pgd))
67 pgd = swapper_pg_dir;
70 printk(KERN_ALERT "pgd = %p\n", pgd);
71 pgd += pgd_index(addr);
72 printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr,
73 (u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd));
75 do {
76 pud_t *pud;
77 pmd_t *pmd;
78 pte_t *pte;
80 if (pgd_none(*pgd))
81 break;
83 if (pgd_bad(*pgd)) {
84 printk("(bad)");
85 break;
88 pud = pud_offset(pgd, addr);
89 if (PTRS_PER_PUD != 1)
90 printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2),
91 (u64)pud_val(*pud));
93 if (pud_none(*pud))
94 break;
96 if (pud_bad(*pud)) {
97 printk("(bad)");
98 break;
101 pmd = pmd_offset(pud, addr);
102 if (PTRS_PER_PMD != 1)
103 printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2),
104 (u64)pmd_val(*pmd));
106 if (pmd_none(*pmd))
107 break;
109 if (pmd_bad(*pmd)) {
110 printk("(bad)");
111 break;
114 /* We must not map this if we have highmem enabled */
115 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
116 break;
118 pte = pte_offset_kernel(pmd, addr);
119 printk(", *pte=%0*Lx", (u32)(sizeof(*pte) * 2),
120 (u64)pte_val(*pte));
121 } while (0);
123 printk("\n");
126 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
128 unsigned index = pgd_index(address);
129 pgd_t *pgd_k;
130 pud_t *pud, *pud_k;
131 pmd_t *pmd, *pmd_k;
133 pgd += index;
134 pgd_k = init_mm.pgd + index;
136 if (!pgd_present(*pgd_k))
137 return NULL;
139 pud = pud_offset(pgd, address);
140 pud_k = pud_offset(pgd_k, address);
141 if (!pud_present(*pud_k))
142 return NULL;
144 if (!pud_present(*pud))
145 set_pud(pud, *pud_k);
147 pmd = pmd_offset(pud, address);
148 pmd_k = pmd_offset(pud_k, address);
149 if (!pmd_present(*pmd_k))
150 return NULL;
152 if (!pmd_present(*pmd))
153 set_pmd(pmd, *pmd_k);
154 else {
156 * The page tables are fully synchronised so there must
157 * be another reason for the fault. Return NULL here to
158 * signal that we have not taken care of the fault.
160 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
161 return NULL;
164 return pmd_k;
167 #ifdef CONFIG_SH_STORE_QUEUES
168 #define __FAULT_ADDR_LIMIT P3_ADDR_MAX
169 #else
170 #define __FAULT_ADDR_LIMIT VMALLOC_END
171 #endif
174 * Handle a fault on the vmalloc or module mapping area
176 static noinline int vmalloc_fault(unsigned long address)
178 pgd_t *pgd_k;
179 pmd_t *pmd_k;
180 pte_t *pte_k;
182 /* Make sure we are in vmalloc/module/P3 area: */
183 if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT))
184 return -1;
187 * Synchronize this task's top level page-table
188 * with the 'reference' page table.
190 * Do _not_ use "current" here. We might be inside
191 * an interrupt in the middle of a task switch..
193 pgd_k = get_TTB();
194 pmd_k = vmalloc_sync_one(pgd_k, address);
195 if (!pmd_k)
196 return -1;
198 pte_k = pte_offset_kernel(pmd_k, address);
199 if (!pte_present(*pte_k))
200 return -1;
202 return 0;
205 static void
206 show_fault_oops(struct pt_regs *regs, unsigned long address)
208 if (!oops_may_print())
209 return;
211 printk(KERN_ALERT "BUG: unable to handle kernel ");
212 if (address < PAGE_SIZE)
213 printk(KERN_CONT "NULL pointer dereference");
214 else
215 printk(KERN_CONT "paging request");
217 printk(KERN_CONT " at %08lx\n", address);
218 printk(KERN_ALERT "PC:");
219 printk_address(regs->pc, 1);
221 show_pte(NULL, address);
224 static noinline void
225 no_context(struct pt_regs *regs, unsigned long error_code,
226 unsigned long address)
228 /* Are we prepared to handle this kernel fault? */
229 if (fixup_exception(regs))
230 return;
232 if (handle_trapped_io(regs, address))
233 return;
236 * Oops. The kernel tried to access some bad page. We'll have to
237 * terminate things with extreme prejudice.
239 bust_spinlocks(1);
241 show_fault_oops(regs, address);
243 die("Oops", regs, error_code);
244 bust_spinlocks(0);
245 do_exit(SIGKILL);
248 static void
249 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
250 unsigned long address, int si_code)
252 struct task_struct *tsk = current;
254 /* User mode accesses just cause a SIGSEGV */
255 if (user_mode(regs)) {
257 * It's possible to have interrupts off here:
259 local_irq_enable();
261 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
263 return;
266 no_context(regs, error_code, address);
269 static noinline void
270 bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
271 unsigned long address)
273 __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
276 static void
277 __bad_area(struct pt_regs *regs, unsigned long error_code,
278 unsigned long address, int si_code)
280 struct mm_struct *mm = current->mm;
283 * Something tried to access memory that isn't in our memory map..
284 * Fix it, but check if it's kernel or user first..
286 up_read(&mm->mmap_sem);
288 __bad_area_nosemaphore(regs, error_code, address, si_code);
291 static noinline void
292 bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
294 __bad_area(regs, error_code, address, SEGV_MAPERR);
297 static noinline void
298 bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
299 unsigned long address)
301 __bad_area(regs, error_code, address, SEGV_ACCERR);
304 static void
305 do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
307 struct task_struct *tsk = current;
308 struct mm_struct *mm = tsk->mm;
310 up_read(&mm->mmap_sem);
312 /* Kernel mode? Handle exceptions or die: */
313 if (!user_mode(regs))
314 no_context(regs, error_code, address);
316 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
319 static noinline int
320 mm_fault_error(struct pt_regs *regs, unsigned long error_code,
321 unsigned long address, unsigned int fault)
324 * Pagefault was interrupted by SIGKILL. We have no reason to
325 * continue pagefault.
327 if (fatal_signal_pending(current)) {
328 if (!(fault & VM_FAULT_RETRY))
329 up_read(&current->mm->mmap_sem);
330 if (!user_mode(regs))
331 no_context(regs, error_code, address);
332 return 1;
335 if (!(fault & VM_FAULT_ERROR))
336 return 0;
338 if (fault & VM_FAULT_OOM) {
339 /* Kernel mode? Handle exceptions or die: */
340 if (!user_mode(regs)) {
341 up_read(&current->mm->mmap_sem);
342 no_context(regs, error_code, address);
343 return 1;
345 up_read(&current->mm->mmap_sem);
348 * We ran out of memory, call the OOM killer, and return the
349 * userspace (which will retry the fault, or kill us if we got
350 * oom-killed):
352 pagefault_out_of_memory();
353 } else {
354 if (fault & VM_FAULT_SIGBUS)
355 do_sigbus(regs, error_code, address);
356 else
357 BUG();
360 return 1;
363 static inline int access_error(int error_code, struct vm_area_struct *vma)
365 if (error_code & FAULT_CODE_WRITE) {
366 /* write, present and write, not present: */
367 if (unlikely(!(vma->vm_flags & VM_WRITE)))
368 return 1;
369 return 0;
372 /* ITLB miss on NX page */
373 if (unlikely((error_code & FAULT_CODE_ITLB) &&
374 !(vma->vm_flags & VM_EXEC)))
375 return 1;
377 /* read, not present: */
378 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
379 return 1;
381 return 0;
384 static int fault_in_kernel_space(unsigned long address)
386 return address >= TASK_SIZE;
390 * This routine handles page faults. It determines the address,
391 * and the problem, and then passes it off to one of the appropriate
392 * routines.
394 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
395 unsigned long error_code,
396 unsigned long address)
398 unsigned long vec;
399 struct task_struct *tsk;
400 struct mm_struct *mm;
401 struct vm_area_struct * vma;
402 int fault;
403 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
405 tsk = current;
406 mm = tsk->mm;
407 vec = lookup_exception_vector();
410 * We fault-in kernel-space virtual memory on-demand. The
411 * 'reference' page table is init_mm.pgd.
413 * NOTE! We MUST NOT take any locks for this case. We may
414 * be in an interrupt or a critical region, and should
415 * only copy the information from the master page table,
416 * nothing more.
418 if (unlikely(fault_in_kernel_space(address))) {
419 if (vmalloc_fault(address) >= 0)
420 return;
421 if (notify_page_fault(regs, vec))
422 return;
424 bad_area_nosemaphore(regs, error_code, address);
425 return;
428 if (unlikely(notify_page_fault(regs, vec)))
429 return;
431 /* Only enable interrupts if they were on before the fault */
432 if ((regs->sr & SR_IMASK) != SR_IMASK)
433 local_irq_enable();
435 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
438 * If we're in an interrupt, have no user context or are running
439 * in an atomic region then we must not take the fault:
441 if (unlikely(in_atomic() || !mm)) {
442 bad_area_nosemaphore(regs, error_code, address);
443 return;
446 retry:
447 down_read(&mm->mmap_sem);
449 vma = find_vma(mm, address);
450 if (unlikely(!vma)) {
451 bad_area(regs, error_code, address);
452 return;
454 if (likely(vma->vm_start <= address))
455 goto good_area;
456 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
457 bad_area(regs, error_code, address);
458 return;
460 if (unlikely(expand_stack(vma, address))) {
461 bad_area(regs, error_code, address);
462 return;
466 * Ok, we have a good vm_area for this memory access, so
467 * we can handle it..
469 good_area:
470 if (unlikely(access_error(error_code, vma))) {
471 bad_area_access_error(regs, error_code, address);
472 return;
475 set_thread_fault_code(error_code);
477 if (user_mode(regs))
478 flags |= FAULT_FLAG_USER;
479 if (error_code & FAULT_CODE_WRITE)
480 flags |= FAULT_FLAG_WRITE;
483 * If for any reason at all we couldn't handle the fault,
484 * make sure we exit gracefully rather than endlessly redo
485 * the fault.
487 fault = handle_mm_fault(mm, vma, address, flags);
489 if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
490 if (mm_fault_error(regs, error_code, address, fault))
491 return;
493 if (flags & FAULT_FLAG_ALLOW_RETRY) {
494 if (fault & VM_FAULT_MAJOR) {
495 tsk->maj_flt++;
496 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
497 regs, address);
498 } else {
499 tsk->min_flt++;
500 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
501 regs, address);
503 if (fault & VM_FAULT_RETRY) {
504 flags &= ~FAULT_FLAG_ALLOW_RETRY;
505 flags |= FAULT_FLAG_TRIED;
508 * No need to up_read(&mm->mmap_sem) as we would
509 * have already released it in __lock_page_or_retry
510 * in mm/filemap.c.
512 goto retry;
516 up_read(&mm->mmap_sem);