2 * linux/arch/alpha/mm/fault.c
4 * Copyright (C) 1995 Linus Torvalds
7 #include <linux/config.h>
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
13 #define __EXTERN_INLINE inline
14 #include <asm/mmu_context.h>
15 #include <asm/pgalloc.h>
16 #undef __EXTERN_INLINE
18 #include <linux/signal.h>
19 #include <linux/errno.h>
20 #include <linux/string.h>
21 #include <linux/types.h>
22 #include <linux/ptrace.h>
23 #include <linux/mman.h>
24 #include <linux/smp.h>
25 #include <linux/smp_lock.h>
26 #include <linux/interrupt.h>
28 #include <asm/system.h>
29 #include <asm/uaccess.h>
31 extern void die_if_kernel(char *,struct pt_regs
*,long, unsigned long *);
35 * Force a new ASN for a task.
39 unsigned long last_asn
= ASN_FIRST_VERSION
;
43 __load_new_mm_context(struct mm_struct
*next_mm
)
47 mmc
= __get_new_mm_context(next_mm
, smp_processor_id());
48 next_mm
->context
= mmc
;
49 current
->thread
.asn
= mmc
& HARDWARE_ASN_MASK
;
51 = ((unsigned long) next_mm
->pgd
- IDENT_ADDR
) >> PAGE_SHIFT
;
53 __reload_thread(¤t
->thread
);
58 * This routine handles page faults. It determines the address,
59 * and the problem, and then passes it off to handle_mm_fault().
62 * 0 = translation not valid
63 * 1 = access violation
65 * 3 = fault-on-execute
69 * -1 = instruction fetch
73 * Registers $9 through $15 are saved in a block just prior to `regs' and
74 * are saved and restored around the call to allow exception code to
78 /* Macro for exception fixup code to access integer registers. */
80 (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
81 (r) <= 18 ? (r)+8 : (r)-10])
84 do_page_fault(unsigned long address
, unsigned long mmcsr
,
85 long cause
, struct pt_regs
*regs
)
87 struct vm_area_struct
* vma
;
88 struct mm_struct
*mm
= current
->mm
;
92 /* As of EV6, a load into $31/$f31 is a prefetch, and never faults
93 (or is suppressed by the PALcode). Support that for older CPUs
94 by ignoring such an instruction. */
97 __get_user(insn
, (unsigned int *)regs
->pc
);
98 if ((insn
>> 21 & 0x1f) == 0x1f &&
99 /* ldq ldl ldt lds ldg ldf ldwu ldbu */
100 (1ul << (insn
>> 26) & 0x30f00001400ul
)) {
106 /* If we're in an interrupt context, or have no user context,
107 we must not take the fault. */
108 if (!mm
|| in_interrupt())
111 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
112 if (address
>= TASK_SIZE
)
117 vma
= find_vma(mm
, address
);
120 if (vma
->vm_start
<= address
)
122 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
124 if (expand_stack(vma
, address
))
127 * Ok, we have a good vm_area for this memory access, so
132 if (!(vma
->vm_flags
& VM_EXEC
))
135 /* Allow reads even for write-only mappings */
136 if (!(vma
->vm_flags
& (VM_READ
| VM_WRITE
)))
139 if (!(vma
->vm_flags
& VM_WRITE
))
144 * If for any reason at all we couldn't handle the fault,
145 * make sure we exit gracefully rather than endlessly redo
148 fault
= handle_mm_fault(mm
, vma
, address
, cause
> 0);
159 * Something tried to access memory that isn't in our memory map..
160 * Fix it, but check if it's kernel or user first..
165 if (user_mode(regs
)) {
166 force_sig(SIGSEGV
, current
);
171 /* Are we prepared to handle this fault as an exception? */
172 if ((fixup
= search_exception_table(regs
->pc
, regs
->gp
)) != 0) {
174 newpc
= fixup_exception(dpf_reg
, fixup
, regs
->pc
);
176 printk("%s: Exception at [<%lx>] (%lx) handled successfully\n",
177 current
->comm
, regs
->pc
, newpc
);
184 * Oops. The kernel tried to access some bad page. We'll have to
185 * terminate things with extreme prejudice.
187 printk(KERN_ALERT
"Unable to handle kernel paging request at "
188 "virtual address %016lx\n", address
);
189 die_if_kernel("Oops", regs
, cause
, (unsigned long*)regs
- 16);
193 * We ran out of memory, or some other thing happened to us that made
194 * us unable to handle the page fault gracefully.
197 printk(KERN_ALERT
"VM: killing process %s(%d)\n",
198 current
->comm
, current
->pid
);
199 if (!user_mode(regs
))
205 * Send a sigbus, regardless of whether we were in kernel
208 force_sig(SIGBUS
, current
);
209 if (!user_mode(regs
))
213 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
215 if (user_mode(regs
)) {
216 force_sig(SIGSEGV
, current
);
219 /* Synchronize this task's top level page-table
220 with the "reference" page table from init. */
221 long offset
= __pgd_offset(address
);
224 pgd
= current
->active_mm
->pgd
+ offset
;
225 pgd_k
= swapper_pg_dir
+ offset
;
226 if (!pgd_present(*pgd
) && pgd_present(*pgd_k
)) {
227 pgd_val(*pgd
) = pgd_val(*pgd_k
);