2 * Copyright (C) 2004-2006 Atmel Corporation
4 * Based on linux/arch/sh/mm/fault.c:
5 * Copyright (C) 1999 Niibe Yutaka
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/pagemap.h>
16 #include <asm/kdebug.h>
17 #include <asm/mmu_context.h>
18 #include <asm/sysreg.h>
19 #include <asm/uaccess.h>
23 static void dump_code(unsigned long pc
)
30 printk(KERN_DEBUG
"Code:");
31 for (i
= 0; i
< 16; i
++) {
32 if (__get_user(val
, p
+ i
))
41 ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain
);
43 /* Hook to register for page fault notifications */
44 int register_page_fault_notifier(struct notifier_block
*nb
)
46 return atomic_notifier_chain_register(¬ify_page_fault_chain
, nb
);
49 int unregister_page_fault_notifier(struct notifier_block
*nb
)
51 return atomic_notifier_chain_unregister(¬ify_page_fault_chain
, nb
);
54 static inline int notify_page_fault(enum die_val val
, struct pt_regs
*regs
,
57 struct die_args args
= {
61 return atomic_notifier_call_chain(¬ify_page_fault_chain
, val
, &args
);
64 static inline int notify_page_fault(enum die_val val
, struct pt_regs
*regs
,
72 * This routine handles page faults. It determines the address and the
73 * problem, and then passes it off to one of the appropriate routines.
75 * ecr is the Exception Cause Register. Possible values are:
76 * 5: Page not found (instruction access)
77 * 6: Protection fault (instruction access)
78 * 12: Page not found (read access)
79 * 13: Page not found (write access)
80 * 14: Protection fault (read access)
81 * 15: Protection fault (write access)
83 asmlinkage
void do_page_fault(unsigned long ecr
, struct pt_regs
*regs
)
85 struct task_struct
*tsk
;
87 struct vm_area_struct
*vma
;
88 const struct exception_table_entry
*fixup
;
89 unsigned long address
;
93 if (notify_page_fault(DIE_PAGE_FAULT
, regs
,
94 ecr
, SIGSEGV
) == NOTIFY_STOP
)
97 address
= sysreg_read(TLBEAR
);
103 * If we're in an interrupt or have no user context, we must
104 * not take the fault...
106 if (in_atomic() || !mm
|| regs
->sr
& SYSREG_BIT(GM
))
111 down_read(&mm
->mmap_sem
);
113 vma
= find_vma(mm
, address
);
116 if (vma
->vm_start
<= address
)
118 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
120 if (expand_stack(vma
, address
))
124 * Ok, we have a good vm_area for this memory access, so we
128 //pr_debug("good area: vm_flags = 0x%lx\n", vma->vm_flags);
130 case ECR_PROTECTION_X
:
132 if (!(vma
->vm_flags
& VM_EXEC
))
135 case ECR_PROTECTION_R
:
137 if (!(vma
->vm_flags
& (VM_READ
| VM_WRITE
| VM_EXEC
)))
140 case ECR_PROTECTION_W
:
142 if (!(vma
->vm_flags
& VM_WRITE
))
147 panic("Unhandled case %lu in do_page_fault!", ecr
);
151 * If for any reason at all we couldn't handle the fault, make
152 * sure we exit gracefully rather than endlessly redo the
156 switch (handle_mm_fault(mm
, vma
, address
, writeaccess
)) {
163 case VM_FAULT_SIGBUS
:
171 up_read(&mm
->mmap_sem
);
175 * Something tried to access memory that isn't in our memory
176 * map. Fix it, but check if it's kernel or user first...
179 pr_debug("Bad area [%s:%u]: addr %08lx, ecr %lu\n",
180 tsk
->comm
, tsk
->pid
, address
, ecr
);
182 up_read(&mm
->mmap_sem
);
184 if (user_mode(regs
)) {
185 /* Hmm...we have to pass address and ecr somehow... */
186 /* tsk->thread.address = address;
187 tsk->thread.error_code = ecr; */
192 page
= sysreg_read(PTBR
);
193 printk("ptbr = %08lx", page
);
195 page
= ((unsigned long *)page
)[address
>> 22];
196 printk(" pgd = %08lx", page
);
197 if (page
& _PAGE_PRESENT
) {
199 address
&= 0x003ff000;
200 page
= ((unsigned long *)__va(page
))[address
>> PAGE_SHIFT
];
201 printk(" pte = %08lx\n", page
);
205 pr_debug("Sending SIGSEGV to PID %d...\n",
207 force_sig(SIGSEGV
, tsk
);
212 pr_debug("No context\n");
214 /* Are we prepared to handle this kernel fault? */
215 fixup
= search_exception_tables(regs
->pc
);
217 regs
->pc
= fixup
->fixup
;
218 pr_debug("Found fixup at %08lx\n", fixup
->fixup
);
223 * Oops. The kernel tried to access some bad page. We'll have
224 * to terminate things with extreme prejudice.
226 if (address
< PAGE_SIZE
)
228 "Unable to handle kernel NULL pointer dereference");
231 "Unable to handle kernel paging request");
232 printk(" at virtual address %08lx\n", address
);
233 printk(KERN_ALERT
"pc = %08lx\n", regs
->pc
);
235 page
= sysreg_read(PTBR
);
236 printk(KERN_ALERT
"ptbr = %08lx", page
);
238 page
= ((unsigned long *)page
)[address
>> 22];
239 printk(" pgd = %08lx", page
);
240 if (page
& _PAGE_PRESENT
) {
242 address
&= 0x003ff000;
243 page
= ((unsigned long *)__va(page
))[address
>> PAGE_SHIFT
];
244 printk(" pte = %08lx\n", page
);
247 die("\nOops", regs
, ecr
);
251 * We ran out of memory, or some other thing happened to us
252 * that made us unable to handle the page fault gracefully.
255 printk("Out of memory\n");
256 up_read(&mm
->mmap_sem
);
257 if (current
->pid
== 1) {
259 down_read(&mm
->mmap_sem
);
262 printk("VM: Killing process %s\n", tsk
->comm
);
268 up_read(&mm
->mmap_sem
);
271 * Send a sigbus, regardless of whether we were in kernel or
274 /* address, error_code, trap_no, ... */
279 pr_debug("Sending SIGBUS to PID %d...\n", tsk
->pid
);
280 force_sig(SIGBUS
, tsk
);
282 /* Kernel mode? Handle exceptions or die */
283 if (!user_mode(regs
))
287 asmlinkage
void do_bus_error(unsigned long addr
, int write_access
,
288 struct pt_regs
*regs
)
291 "Bus error at physical address 0x%08lx (%s access)\n",
292 addr
, write_access
? "write" : "read");
293 printk(KERN_INFO
"DTLB dump:\n");
295 die("Bus Error", regs
, write_access
);
300 * This functionality is currently not possible to implement because
301 * we're using segmentation to ensure a fixed mapping of the kernel
302 * virtual address space.
304 * It would be possible to implement this, but it would require us to
305 * disable segmentation at startup and load the kernel mappings into
306 * the TLB like any other pages. There will be lots of trickery to
307 * avoid recursive invocation of the TLB miss handler, though...
309 #ifdef CONFIG_DEBUG_PAGEALLOC
310 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
314 EXPORT_SYMBOL(kernel_map_pages
);