2 * Page fault handler for SH with an MMU.
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 - 2007 Paul Mundt
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/kernel.h>
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <asm/kdebug.h>
19 #include <asm/system.h>
20 #include <asm/mmu_context.h>
21 #include <asm/tlbflush.h>
25 ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain
);
27 /* Hook to register for page fault notifications */
28 int register_page_fault_notifier(struct notifier_block
*nb
)
30 return atomic_notifier_chain_register(¬ify_page_fault_chain
, nb
);
33 int unregister_page_fault_notifier(struct notifier_block
*nb
)
35 return atomic_notifier_chain_unregister(¬ify_page_fault_chain
, nb
);
38 static inline int notify_page_fault(enum die_val val
, struct pt_regs
*regs
,
41 struct die_args args
= {
45 return atomic_notifier_call_chain(¬ify_page_fault_chain
, val
, &args
);
48 static inline int notify_page_fault(enum die_val val
, struct pt_regs
*regs
,
56 * This routine handles page faults. It determines the address,
57 * and the problem, and then passes it off to one of the appropriate
60 asmlinkage
void __kprobes
do_page_fault(struct pt_regs
*regs
,
61 unsigned long writeaccess
,
62 unsigned long address
)
64 struct task_struct
*tsk
;
66 struct vm_area_struct
* vma
;
73 if (notify_page_fault(DIE_PAGE_FAULT
, regs
,
74 writeaccess
, SIGSEGV
) == NOTIFY_STOP
)
80 if (kgdb_nofault
&& kgdb_bus_err_hook
)
86 si_code
= SEGV_MAPERR
;
88 if (unlikely(address
>= TASK_SIZE
)) {
90 * Synchronize this task's top level page-table
91 * with the 'reference' page table.
93 * Do _not_ use "tsk" here. We might be inside
94 * an interrupt in the middle of a task switch..
96 int offset
= pgd_index(address
);
101 pgd
= get_TTB() + offset
;
102 pgd_k
= swapper_pg_dir
+ offset
;
104 /* This will never happen with the folded page table. */
105 if (!pgd_present(*pgd
)) {
106 if (!pgd_present(*pgd_k
))
107 goto bad_area_nosemaphore
;
108 set_pgd(pgd
, *pgd_k
);
112 pud
= pud_offset(pgd
, address
);
113 pud_k
= pud_offset(pgd_k
, address
);
114 if (pud_present(*pud
) || !pud_present(*pud_k
))
115 goto bad_area_nosemaphore
;
116 set_pud(pud
, *pud_k
);
118 pmd
= pmd_offset(pud
, address
);
119 pmd_k
= pmd_offset(pud_k
, address
);
120 if (pmd_present(*pmd
) || !pmd_present(*pmd_k
))
121 goto bad_area_nosemaphore
;
122 set_pmd(pmd
, *pmd_k
);
128 * If we're in an interrupt or have no user
129 * context, we must not take the fault..
131 if (in_atomic() || !mm
)
134 down_read(&mm
->mmap_sem
);
136 vma
= find_vma(mm
, address
);
139 if (vma
->vm_start
<= address
)
141 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
143 if (expand_stack(vma
, address
))
146 * Ok, we have a good vm_area for this memory access, so
150 si_code
= SEGV_ACCERR
;
152 if (!(vma
->vm_flags
& VM_WRITE
))
155 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
| VM_WRITE
)))
160 * If for any reason at all we couldn't handle the fault,
161 * make sure we exit gracefully rather than endlessly redo
165 switch (handle_mm_fault(mm
, vma
, address
, writeaccess
)) {
172 case VM_FAULT_SIGBUS
:
180 up_read(&mm
->mmap_sem
);
184 * Something tried to access memory that isn't in our memory map..
185 * Fix it, but check if it's kernel or user first..
188 up_read(&mm
->mmap_sem
);
190 bad_area_nosemaphore
:
191 if (user_mode(regs
)) {
192 info
.si_signo
= SIGSEGV
;
194 info
.si_code
= si_code
;
195 info
.si_addr
= (void *) address
;
196 force_sig_info(SIGSEGV
, &info
, tsk
);
201 /* Are we prepared to handle this kernel fault? */
202 if (fixup_exception(regs
))
206 * Oops. The kernel tried to access some bad page. We'll have to
207 * terminate things with extreme prejudice.
210 if (address
< PAGE_SIZE
)
211 printk(KERN_ALERT
"Unable to handle kernel NULL pointer dereference");
213 printk(KERN_ALERT
"Unable to handle kernel paging request");
214 printk(" at virtual address %08lx\n", address
);
215 printk(KERN_ALERT
"pc = %08lx\n", regs
->pc
);
216 page
= (unsigned long)get_TTB();
218 page
= ((unsigned long *) page
)[address
>> PGDIR_SHIFT
];
219 printk(KERN_ALERT
"*pde = %08lx\n", page
);
220 if (page
& _PAGE_PRESENT
) {
222 address
&= 0x003ff000;
223 page
= ((unsigned long *) __va(page
))[address
>> PAGE_SHIFT
];
224 printk(KERN_ALERT
"*pte = %08lx\n", page
);
227 die("Oops", regs
, writeaccess
);
231 * We ran out of memory, or some other thing happened to us that made
232 * us unable to handle the page fault gracefully.
235 up_read(&mm
->mmap_sem
);
236 if (is_init(current
)) {
238 down_read(&mm
->mmap_sem
);
241 printk("VM: killing process %s\n", tsk
->comm
);
247 up_read(&mm
->mmap_sem
);
250 * Send a sigbus, regardless of whether we were in kernel
253 info
.si_signo
= SIGBUS
;
255 info
.si_code
= BUS_ADRERR
;
256 info
.si_addr
= (void *)address
;
257 force_sig_info(SIGBUS
, &info
, tsk
);
259 /* Kernel mode? Handle exceptions or die */
260 if (!user_mode(regs
))
264 #ifdef CONFIG_SH_STORE_QUEUES
266 * This is a special case for the SH-4 store queues, as pages for this
267 * space still need to be faulted in before it's possible to flush the
268 * store queue cache for writeout to the remapped region.
270 #define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
272 #define P3_ADDR_MAX P4SEG
276 * Called with interrupts disabled.
278 asmlinkage
int __kprobes
__do_page_fault(struct pt_regs
*regs
,
279 unsigned long writeaccess
,
280 unsigned long address
)
287 struct mm_struct
*mm
= current
->mm
;
291 #ifdef CONFIG_SH_KGDB
292 if (kgdb_nofault
&& kgdb_bus_err_hook
)
297 * We don't take page faults for P1, P2, and parts of P4, these
298 * are always mapped, whether it be due to legacy behaviour in
299 * 29-bit mode, or due to PMB configuration in 32-bit mode.
301 if (address
>= P3SEG
&& address
< P3_ADDR_MAX
) {
302 pgd
= pgd_offset_k(address
);
305 if (unlikely(address
>= TASK_SIZE
|| !mm
))
308 pgd
= pgd_offset(mm
, address
);
311 pud
= pud_offset(pgd
, address
);
312 if (pud_none_or_clear_bad(pud
))
314 pmd
= pmd_offset(pud
, address
);
315 if (pmd_none_or_clear_bad(pmd
))
319 pte
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
321 pte
= pte_offset_kernel(pmd
, address
);
324 if (unlikely(pte_none(entry
) || pte_not_present(entry
)))
326 if (unlikely(writeaccess
&& !pte_write(entry
)))
330 entry
= pte_mkdirty(entry
);
331 entry
= pte_mkyoung(entry
);
333 #ifdef CONFIG_CPU_SH4
335 * ITLB is not affected by "ldtlb" instruction.
336 * So, we need to flush the entry by ourselves.
338 local_flush_tlb_one(get_asid(), address
& PAGE_MASK
);
342 update_mmu_cache(NULL
, address
, entry
);
346 pte_unmap_unlock(pte
, ptl
);