[AVR32] Clean up exception handling code
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / avr32 / mm / fault.c
blob146ebdbdc3027b07e9893d7ce374ac6e46148164
1 /*
2 * Copyright (C) 2004-2006 Atmel Corporation
4 * Based on linux/arch/sh/mm/fault.c:
5 * Copyright (C) 1999 Niibe Yutaka
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/pagemap.h>
16 #include <asm/kdebug.h>
17 #include <asm/mmu_context.h>
18 #include <asm/sysreg.h>
19 #include <asm/tlb.h>
20 #include <asm/uaccess.h>
22 #ifdef CONFIG_KPROBES
23 ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
25 /* Hook to register for page fault notifications */
26 int register_page_fault_notifier(struct notifier_block *nb)
28 return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
31 int unregister_page_fault_notifier(struct notifier_block *nb)
33 return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
36 static inline int notify_page_fault(enum die_val val, struct pt_regs *regs,
37 int trap, int sig)
39 struct die_args args = {
40 .regs = regs,
41 .trapnr = trap,
43 return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
45 #else
46 static inline int notify_page_fault(enum die_val val, struct pt_regs *regs,
47 int trap, int sig)
49 return NOTIFY_DONE;
51 #endif
53 int exception_trace = 1;
56 * This routine handles page faults. It determines the address and the
57 * problem, and then passes it off to one of the appropriate routines.
59 * ecr is the Exception Cause Register. Possible values are:
60 * 6: Protection fault (instruction access)
61 * 15: Protection fault (read access)
62 * 16: Protection fault (write access)
63 * 20: Page not found (instruction access)
64 * 24: Page not found (read access)
65 * 28: Page not found (write access)
67 asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
69 struct task_struct *tsk;
70 struct mm_struct *mm;
71 struct vm_area_struct *vma;
72 const struct exception_table_entry *fixup;
73 unsigned long address;
74 unsigned long page;
75 int writeaccess;
76 long signr;
77 int code;
79 if (notify_page_fault(DIE_PAGE_FAULT, regs,
80 ecr, SIGSEGV) == NOTIFY_STOP)
81 return;
83 address = sysreg_read(TLBEAR);
85 tsk = current;
86 mm = tsk->mm;
88 signr = SIGSEGV;
89 code = SEGV_MAPERR;
92 * If we're in an interrupt or have no user context, we must
93 * not take the fault...
95 if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
96 goto no_context;
98 local_irq_enable();
100 down_read(&mm->mmap_sem);
102 vma = find_vma(mm, address);
103 if (!vma)
104 goto bad_area;
105 if (vma->vm_start <= address)
106 goto good_area;
107 if (!(vma->vm_flags & VM_GROWSDOWN))
108 goto bad_area;
109 if (expand_stack(vma, address))
110 goto bad_area;
113 * Ok, we have a good vm_area for this memory access, so we
114 * can handle it...
116 good_area:
117 code = SEGV_ACCERR;
118 writeaccess = 0;
120 switch (ecr) {
121 case ECR_PROTECTION_X:
122 case ECR_TLB_MISS_X:
123 if (!(vma->vm_flags & VM_EXEC))
124 goto bad_area;
125 break;
126 case ECR_PROTECTION_R:
127 case ECR_TLB_MISS_R:
128 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
129 goto bad_area;
130 break;
131 case ECR_PROTECTION_W:
132 case ECR_TLB_MISS_W:
133 if (!(vma->vm_flags & VM_WRITE))
134 goto bad_area;
135 writeaccess = 1;
136 break;
137 default:
138 panic("Unhandled case %lu in do_page_fault!", ecr);
142 * If for any reason at all we couldn't handle the fault, make
143 * sure we exit gracefully rather than endlessly redo the
144 * fault.
146 survive:
147 switch (handle_mm_fault(mm, vma, address, writeaccess)) {
148 case VM_FAULT_MINOR:
149 tsk->min_flt++;
150 break;
151 case VM_FAULT_MAJOR:
152 tsk->maj_flt++;
153 break;
154 case VM_FAULT_SIGBUS:
155 goto do_sigbus;
156 case VM_FAULT_OOM:
157 goto out_of_memory;
158 default:
159 BUG();
162 up_read(&mm->mmap_sem);
163 return;
166 * Something tried to access memory that isn't in our memory
167 * map. Fix it, but check if it's kernel or user first...
169 bad_area:
170 up_read(&mm->mmap_sem);
172 if (user_mode(regs)) {
173 if (exception_trace)
174 printk("%s%s[%d]: segfault at %08lx pc %08lx "
175 "sp %08lx ecr %lu\n",
176 is_init(tsk) ? KERN_EMERG : KERN_INFO,
177 tsk->comm, tsk->pid, address, regs->pc,
178 regs->sp, ecr);
179 _exception(SIGSEGV, regs, code, address);
180 return;
183 no_context:
184 /* Are we prepared to handle this kernel fault? */
185 fixup = search_exception_tables(regs->pc);
186 if (fixup) {
187 regs->pc = fixup->fixup;
188 return;
192 * Oops. The kernel tried to access some bad page. We'll have
193 * to terminate things with extreme prejudice.
195 if (address < PAGE_SIZE)
196 printk(KERN_ALERT
197 "Unable to handle kernel NULL pointer dereference");
198 else
199 printk(KERN_ALERT
200 "Unable to handle kernel paging request");
201 printk(" at virtual address %08lx\n", address);
203 page = sysreg_read(PTBR);
204 printk(KERN_ALERT "ptbr = %08lx", page);
205 if (page) {
206 page = ((unsigned long *)page)[address >> 22];
207 printk(" pgd = %08lx", page);
208 if (page & _PAGE_PRESENT) {
209 page &= PAGE_MASK;
210 address &= 0x003ff000;
211 page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT];
212 printk(" pte = %08lx", page);
215 printk("\n");
216 die("Kernel access of bad area", regs, signr);
217 return;
220 * We ran out of memory, or some other thing happened to us
221 * that made us unable to handle the page fault gracefully.
223 out_of_memory:
224 up_read(&mm->mmap_sem);
225 if (is_init(current)) {
226 yield();
227 down_read(&mm->mmap_sem);
228 goto survive;
230 printk("VM: Killing process %s\n", tsk->comm);
231 if (user_mode(regs))
232 do_exit(SIGKILL);
233 goto no_context;
235 do_sigbus:
236 up_read(&mm->mmap_sem);
238 /* Kernel mode? Handle exceptions or die */
239 signr = SIGBUS;
240 code = BUS_ADRERR;
241 if (!user_mode(regs))
242 goto no_context;
244 if (exception_trace)
245 printk("%s%s[%d]: bus error at %08lx pc %08lx "
246 "sp %08lx ecr %lu\n",
247 is_init(tsk) ? KERN_EMERG : KERN_INFO,
248 tsk->comm, tsk->pid, address, regs->pc,
249 regs->sp, ecr);
251 _exception(SIGBUS, regs, BUS_ADRERR, address);
254 asmlinkage void do_bus_error(unsigned long addr, int write_access,
255 struct pt_regs *regs)
257 printk(KERN_ALERT
258 "Bus error at physical address 0x%08lx (%s access)\n",
259 addr, write_access ? "write" : "read");
260 printk(KERN_INFO "DTLB dump:\n");
261 dump_dtlb();
262 die("Bus Error", regs, SIGKILL);
266 * This functionality is currently not possible to implement because
267 * we're using segmentation to ensure a fixed mapping of the kernel
268 * virtual address space.
270 * It would be possible to implement this, but it would require us to
271 * disable segmentation at startup and load the kernel mappings into
272 * the TLB like any other pages. There will be lots of trickery to
273 * avoid recursive invocation of the TLB miss handler, though...
275 #ifdef CONFIG_DEBUG_PAGEALLOC
276 void kernel_map_pages(struct page *page, int numpages, int enable)
280 EXPORT_SYMBOL(kernel_map_pages);
281 #endif