Linux 2.4.0-test10pre4
[davej-history.git] / arch / alpha / mm / fault.c
bloba5140ce1cb78a1715f909dc7679ecc751209e0d6
1 /*
2 * linux/arch/alpha/mm/fault.c
4 * Copyright (C) 1995 Linus Torvalds
5 */
7 #include <linux/config.h>
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <asm/io.h>
13 #define __EXTERN_INLINE inline
14 #include <asm/mmu_context.h>
15 #include <asm/pgalloc.h>
16 #undef __EXTERN_INLINE
18 #include <linux/signal.h>
19 #include <linux/errno.h>
20 #include <linux/string.h>
21 #include <linux/types.h>
22 #include <linux/ptrace.h>
23 #include <linux/mman.h>
24 #include <linux/smp.h>
25 #include <linux/smp_lock.h>
26 #include <linux/interrupt.h>
28 #include <asm/system.h>
29 #include <asm/uaccess.h>
31 extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *);
35 * Force a new ASN for a task.
38 #ifndef CONFIG_SMP
39 unsigned long last_asn = ASN_FIRST_VERSION;
40 #endif
42 extern void
43 __load_new_mm_context(struct mm_struct *next_mm)
45 unsigned long mmc;
47 mmc = __get_new_mm_context(next_mm, smp_processor_id());
48 next_mm->context = mmc;
49 current->thread.asn = mmc & HARDWARE_ASN_MASK;
50 current->thread.ptbr
51 = ((unsigned long) next_mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
53 __reload_thread(&current->thread);
58 * This routine handles page faults. It determines the address,
59 * and the problem, and then passes it off to handle_mm_fault().
61 * mmcsr:
62 * 0 = translation not valid
63 * 1 = access violation
64 * 2 = fault-on-read
65 * 3 = fault-on-execute
66 * 4 = fault-on-write
68 * cause:
69 * -1 = instruction fetch
70 * 0 = load
71 * 1 = store
73 * Registers $9 through $15 are saved in a block just prior to `regs' and
74 * are saved and restored around the call to allow exception code to
75 * modify them.
78 /* Macro for exception fixup code to access integer registers. */
79 #define dpf_reg(r) \
80 (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
81 (r) <= 18 ? (r)+8 : (r)-10])
83 asmlinkage void
84 do_page_fault(unsigned long address, unsigned long mmcsr,
85 long cause, struct pt_regs *regs)
87 struct vm_area_struct * vma;
88 struct mm_struct *mm = current->mm;
89 unsigned int fixup;
90 int fault;
92 /* As of EV6, a load into $31/$f31 is a prefetch, and never faults
93 (or is suppressed by the PALcode). Support that for older CPUs
94 by ignoring such an instruction. */
95 if (cause == 0) {
96 unsigned int insn;
97 __get_user(insn, (unsigned int *)regs->pc);
98 if ((insn >> 21 & 0x1f) == 0x1f &&
99 /* ldq ldl ldt lds ldg ldf ldwu ldbu */
100 (1ul << (insn >> 26) & 0x30f00001400ul)) {
101 regs->pc += 4;
102 return;
106 /* If we're in an interrupt context, or have no user context,
107 we must not take the fault. */
108 if (!mm || in_interrupt())
109 goto no_context;
111 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
112 if (address >= TASK_SIZE)
113 goto vmalloc_fault;
114 #endif
116 down(&mm->mmap_sem);
117 vma = find_vma(mm, address);
118 if (!vma)
119 goto bad_area;
120 if (vma->vm_start <= address)
121 goto good_area;
122 if (!(vma->vm_flags & VM_GROWSDOWN))
123 goto bad_area;
124 if (expand_stack(vma, address))
125 goto bad_area;
127 * Ok, we have a good vm_area for this memory access, so
128 * we can handle it..
130 good_area:
131 if (cause < 0) {
132 if (!(vma->vm_flags & VM_EXEC))
133 goto bad_area;
134 } else if (!cause) {
135 /* Allow reads even for write-only mappings */
136 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
137 goto bad_area;
138 } else {
139 if (!(vma->vm_flags & VM_WRITE))
140 goto bad_area;
144 * If for any reason at all we couldn't handle the fault,
145 * make sure we exit gracefully rather than endlessly redo
146 * the fault.
148 fault = handle_mm_fault(mm, vma, address, cause > 0);
149 up(&mm->mmap_sem);
151 if (fault < 0)
152 goto out_of_memory;
153 if (fault == 0)
154 goto do_sigbus;
156 return;
159 * Something tried to access memory that isn't in our memory map..
160 * Fix it, but check if it's kernel or user first..
162 bad_area:
163 up(&mm->mmap_sem);
165 if (user_mode(regs)) {
166 force_sig(SIGSEGV, current);
167 return;
170 no_context:
171 /* Are we prepared to handle this fault as an exception? */
172 if ((fixup = search_exception_table(regs->pc, regs->gp)) != 0) {
173 unsigned long newpc;
174 newpc = fixup_exception(dpf_reg, fixup, regs->pc);
175 #if 0
176 printk("%s: Exception at [<%lx>] (%lx) handled successfully\n",
177 current->comm, regs->pc, newpc);
178 #endif
179 regs->pc = newpc;
180 return;
184 * Oops. The kernel tried to access some bad page. We'll have to
185 * terminate things with extreme prejudice.
187 printk(KERN_ALERT "Unable to handle kernel paging request at "
188 "virtual address %016lx\n", address);
189 die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16);
190 do_exit(SIGKILL);
193 * We ran out of memory, or some other thing happened to us that made
194 * us unable to handle the page fault gracefully.
196 out_of_memory:
197 printk(KERN_ALERT "VM: killing process %s(%d)\n",
198 current->comm, current->pid);
199 if (!user_mode(regs))
200 goto no_context;
201 do_exit(SIGKILL);
203 do_sigbus:
205 * Send a sigbus, regardless of whether we were in kernel
206 * or user mode.
208 force_sig(SIGBUS, current);
209 if (!user_mode(regs))
210 goto no_context;
211 return;
213 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
214 vmalloc_fault:
215 if (user_mode(regs)) {
216 force_sig(SIGSEGV, current);
217 return;
218 } else {
219 /* Synchronize this task's top level page-table
220 with the "reference" page table from init. */
221 long offset = __pgd_offset(address);
222 pgd_t *pgd, *pgd_k;
224 pgd = current->active_mm->pgd + offset;
225 pgd_k = swapper_pg_dir + offset;
226 if (!pgd_present(*pgd) && pgd_present(*pgd_k)) {
227 pgd_val(*pgd) = pgd_val(*pgd_k);
228 return;
230 goto no_context;
232 #endif