2 * Page fault handler for SH with an MMU.
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 - 2007 Paul Mundt
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/kernel.h>
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <asm/system.h>
19 #include <asm/mmu_context.h>
20 #include <asm/tlbflush.h>
24 * This routine handles page faults. It determines the address,
25 * and the problem, and then passes it off to one of the appropriate
28 asmlinkage
void __kprobes
do_page_fault(struct pt_regs
*regs
,
29 unsigned long writeaccess
,
30 unsigned long address
)
32 struct task_struct
*tsk
;
34 struct vm_area_struct
* vma
;
44 if (kgdb_nofault
&& kgdb_bus_err_hook
)
50 si_code
= SEGV_MAPERR
;
52 if (unlikely(address
>= TASK_SIZE
)) {
54 * Synchronize this task's top level page-table
55 * with the 'reference' page table.
57 * Do _not_ use "tsk" here. We might be inside
58 * an interrupt in the middle of a task switch..
60 int offset
= pgd_index(address
);
65 pgd
= get_TTB() + offset
;
66 pgd_k
= swapper_pg_dir
+ offset
;
68 /* This will never happen with the folded page table. */
69 if (!pgd_present(*pgd
)) {
70 if (!pgd_present(*pgd_k
))
71 goto bad_area_nosemaphore
;
76 pud
= pud_offset(pgd
, address
);
77 pud_k
= pud_offset(pgd_k
, address
);
78 if (pud_present(*pud
) || !pud_present(*pud_k
))
79 goto bad_area_nosemaphore
;
82 pmd
= pmd_offset(pud
, address
);
83 pmd_k
= pmd_offset(pud_k
, address
);
84 if (pmd_present(*pmd
) || !pmd_present(*pmd_k
))
85 goto bad_area_nosemaphore
;
92 * If we're in an interrupt or have no user
93 * context, we must not take the fault..
95 if (in_atomic() || !mm
)
98 down_read(&mm
->mmap_sem
);
100 vma
= find_vma(mm
, address
);
103 if (vma
->vm_start
<= address
)
105 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
107 if (expand_stack(vma
, address
))
110 * Ok, we have a good vm_area for this memory access, so
114 si_code
= SEGV_ACCERR
;
116 if (!(vma
->vm_flags
& VM_WRITE
))
119 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
| VM_WRITE
)))
124 * If for any reason at all we couldn't handle the fault,
125 * make sure we exit gracefully rather than endlessly redo
129 fault
= handle_mm_fault(mm
, vma
, address
, writeaccess
);
130 if (unlikely(fault
& VM_FAULT_ERROR
)) {
131 if (fault
& VM_FAULT_OOM
)
133 else if (fault
& VM_FAULT_SIGBUS
)
137 if (fault
& VM_FAULT_MAJOR
)
142 up_read(&mm
->mmap_sem
);
146 * Something tried to access memory that isn't in our memory map..
147 * Fix it, but check if it's kernel or user first..
150 up_read(&mm
->mmap_sem
);
152 bad_area_nosemaphore
:
153 if (user_mode(regs
)) {
154 info
.si_signo
= SIGSEGV
;
156 info
.si_code
= si_code
;
157 info
.si_addr
= (void *) address
;
158 force_sig_info(SIGSEGV
, &info
, tsk
);
163 /* Are we prepared to handle this kernel fault? */
164 if (fixup_exception(regs
))
168 * Oops. The kernel tried to access some bad page. We'll have to
169 * terminate things with extreme prejudice.
172 if (address
< PAGE_SIZE
)
173 printk(KERN_ALERT
"Unable to handle kernel NULL pointer dereference");
175 printk(KERN_ALERT
"Unable to handle kernel paging request");
176 printk(" at virtual address %08lx\n", address
);
177 printk(KERN_ALERT
"pc = %08lx\n", regs
->pc
);
178 page
= (unsigned long)get_TTB();
180 page
= ((unsigned long *) page
)[address
>> PGDIR_SHIFT
];
181 printk(KERN_ALERT
"*pde = %08lx\n", page
);
182 if (page
& _PAGE_PRESENT
) {
184 address
&= 0x003ff000;
185 page
= ((unsigned long *) __va(page
))[address
>> PAGE_SHIFT
];
186 printk(KERN_ALERT
"*pte = %08lx\n", page
);
189 die("Oops", regs
, writeaccess
);
193 * We ran out of memory, or some other thing happened to us that made
194 * us unable to handle the page fault gracefully.
197 up_read(&mm
->mmap_sem
);
198 if (is_init(current
)) {
200 down_read(&mm
->mmap_sem
);
203 printk("VM: killing process %s\n", tsk
->comm
);
209 up_read(&mm
->mmap_sem
);
212 * Send a sigbus, regardless of whether we were in kernel
215 info
.si_signo
= SIGBUS
;
217 info
.si_code
= BUS_ADRERR
;
218 info
.si_addr
= (void *)address
;
219 force_sig_info(SIGBUS
, &info
, tsk
);
221 /* Kernel mode? Handle exceptions or die */
222 if (!user_mode(regs
))
226 #ifdef CONFIG_SH_STORE_QUEUES
228 * This is a special case for the SH-4 store queues, as pages for this
229 * space still need to be faulted in before it's possible to flush the
230 * store queue cache for writeout to the remapped region.
232 #define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
234 #define P3_ADDR_MAX P4SEG
238 * Called with interrupts disabled.
240 asmlinkage
int __kprobes
__do_page_fault(struct pt_regs
*regs
,
241 unsigned long writeaccess
,
242 unsigned long address
)
249 struct mm_struct
*mm
= current
->mm
;
250 spinlock_t
*ptl
= NULL
;
253 #ifdef CONFIG_SH_KGDB
254 if (kgdb_nofault
&& kgdb_bus_err_hook
)
259 * We don't take page faults for P1, P2, and parts of P4, these
260 * are always mapped, whether it be due to legacy behaviour in
261 * 29-bit mode, or due to PMB configuration in 32-bit mode.
263 if (address
>= P3SEG
&& address
< P3_ADDR_MAX
) {
264 pgd
= pgd_offset_k(address
);
267 if (unlikely(address
>= TASK_SIZE
|| !mm
))
270 pgd
= pgd_offset(mm
, address
);
273 pud
= pud_offset(pgd
, address
);
274 if (pud_none_or_clear_bad(pud
))
276 pmd
= pmd_offset(pud
, address
);
277 if (pmd_none_or_clear_bad(pmd
))
281 pte
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
283 pte
= pte_offset_kernel(pmd
, address
);
286 if (unlikely(pte_none(entry
) || pte_not_present(entry
)))
288 if (unlikely(writeaccess
&& !pte_write(entry
)))
292 entry
= pte_mkdirty(entry
);
293 entry
= pte_mkyoung(entry
);
295 #ifdef CONFIG_CPU_SH4
297 * ITLB is not affected by "ldtlb" instruction.
298 * So, we need to flush the entry by ourselves.
300 local_flush_tlb_one(get_asid(), address
& PAGE_MASK
);
304 update_mmu_cache(NULL
, address
, entry
);
308 pte_unmap_unlock(pte
, ptl
);