Import 2.1.116pre2
[davej-history.git] / arch / alpha / mm / fault.c
blobf3a92306413205c485e5c140874f3b40ff99d136
1 /*
2 * linux/arch/alpha/mm/fault.c
4 * Copyright (C) 1995 Linus Torvalds
5 */
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
11 #define __EXTERN_INLINE inline
12 #include <asm/mmu_context.h>
13 #include <asm/pgtable.h>
14 #undef __EXTERN_INLINE
16 #include <linux/signal.h>
17 #include <linux/head.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
23 #include <linux/smp.h>
24 #include <linux/smp_lock.h>
26 #include <asm/system.h>
27 #include <asm/uaccess.h>
29 extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *);
32 #ifdef __SMP__
33 unsigned long last_asn[NR_CPUS] = { /* gag */
34 ASN_FIRST_VERSION + (0 << WIDTH_HARDWARE_ASN),
35 ASN_FIRST_VERSION + (1 << WIDTH_HARDWARE_ASN),
36 ASN_FIRST_VERSION + (2 << WIDTH_HARDWARE_ASN),
37 ASN_FIRST_VERSION + (3 << WIDTH_HARDWARE_ASN),
38 ASN_FIRST_VERSION + (4 << WIDTH_HARDWARE_ASN),
39 ASN_FIRST_VERSION + (5 << WIDTH_HARDWARE_ASN),
40 ASN_FIRST_VERSION + (6 << WIDTH_HARDWARE_ASN),
41 ASN_FIRST_VERSION + (7 << WIDTH_HARDWARE_ASN),
42 ASN_FIRST_VERSION + (8 << WIDTH_HARDWARE_ASN),
43 ASN_FIRST_VERSION + (9 << WIDTH_HARDWARE_ASN),
44 ASN_FIRST_VERSION + (10 << WIDTH_HARDWARE_ASN),
45 ASN_FIRST_VERSION + (11 << WIDTH_HARDWARE_ASN),
46 ASN_FIRST_VERSION + (12 << WIDTH_HARDWARE_ASN),
47 ASN_FIRST_VERSION + (13 << WIDTH_HARDWARE_ASN),
48 ASN_FIRST_VERSION + (14 << WIDTH_HARDWARE_ASN),
49 ASN_FIRST_VERSION + (15 << WIDTH_HARDWARE_ASN),
50 ASN_FIRST_VERSION + (16 << WIDTH_HARDWARE_ASN),
51 ASN_FIRST_VERSION + (17 << WIDTH_HARDWARE_ASN),
52 ASN_FIRST_VERSION + (18 << WIDTH_HARDWARE_ASN),
53 ASN_FIRST_VERSION + (19 << WIDTH_HARDWARE_ASN),
54 ASN_FIRST_VERSION + (20 << WIDTH_HARDWARE_ASN),
55 ASN_FIRST_VERSION + (21 << WIDTH_HARDWARE_ASN),
56 ASN_FIRST_VERSION + (22 << WIDTH_HARDWARE_ASN),
57 ASN_FIRST_VERSION + (23 << WIDTH_HARDWARE_ASN),
58 ASN_FIRST_VERSION + (24 << WIDTH_HARDWARE_ASN),
59 ASN_FIRST_VERSION + (25 << WIDTH_HARDWARE_ASN),
60 ASN_FIRST_VERSION + (26 << WIDTH_HARDWARE_ASN),
61 ASN_FIRST_VERSION + (27 << WIDTH_HARDWARE_ASN),
62 ASN_FIRST_VERSION + (28 << WIDTH_HARDWARE_ASN),
63 ASN_FIRST_VERSION + (29 << WIDTH_HARDWARE_ASN),
64 ASN_FIRST_VERSION + (30 << WIDTH_HARDWARE_ASN),
65 ASN_FIRST_VERSION + (31 << WIDTH_HARDWARE_ASN)
67 #else
68 unsigned long asn_cache = ASN_FIRST_VERSION;
69 #endif /* __SMP__ */
72 * Select a new ASN for a task.
75 void
76 get_new_mmu_context(struct task_struct *p, struct mm_struct *mm)
78 unsigned long asn = asn_cache;
80 if ((asn & HARDWARE_ASN_MASK) < MAX_ASN)
81 ++asn;
82 else {
83 tbiap();
84 imb();
85 asn = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION;
87 asn_cache = asn;
88 mm->context = asn; /* full version + asn */
89 p->tss.asn = asn & HARDWARE_ASN_MASK; /* just asn */
93 * This routine handles page faults. It determines the address,
94 * and the problem, and then passes it off to handle_mm_fault().
96 * mmcsr:
97 * 0 = translation not valid
98 * 1 = access violation
99 * 2 = fault-on-read
100 * 3 = fault-on-execute
101 * 4 = fault-on-write
103 * cause:
104 * -1 = instruction fetch
105 * 0 = load
106 * 1 = store
108 * Registers $9 through $15 are saved in a block just prior to `regs' and
109 * are saved and restored around the call to allow exception code to
110 * modify them.
113 /* Macro for exception fixup code to access integer registers. */
114 #define dpf_reg(r) \
115 (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
116 (r) <= 18 ? (r)+8 : (r)-10])
118 asmlinkage void
119 do_page_fault(unsigned long address, unsigned long mmcsr,
120 long cause, struct pt_regs *regs)
122 struct vm_area_struct * vma;
123 struct mm_struct *mm = current->mm;
124 unsigned fixup;
126 /* As of EV6, a load into $31/$f31 is a prefetch, and never faults
127 (or is suppressed by the PALcode). Support that for older CPUs
128 by ignoring such an instruction. */
129 if (cause == 0) {
130 unsigned int insn;
131 __get_user(insn, (unsigned int *)regs->pc);
132 if ((insn >> 21 & 0x1f) == 0x1f &&
133 /* ldq ldl ldt lds ldg ldf ldwu ldbu */
134 (1ul << (insn >> 26) & 0x30f00001400ul)) {
135 regs->pc += 4;
136 return;
140 down(&mm->mmap_sem);
141 lock_kernel();
142 vma = find_vma(mm, address);
143 if (!vma)
144 goto bad_area;
145 if (vma->vm_start <= address)
146 goto good_area;
147 if (!(vma->vm_flags & VM_GROWSDOWN))
148 goto bad_area;
149 if (expand_stack(vma, address))
150 goto bad_area;
152 * Ok, we have a good vm_area for this memory access, so
153 * we can handle it..
155 good_area:
156 if (cause < 0) {
157 if (!(vma->vm_flags & VM_EXEC))
158 goto bad_area;
159 } else if (!cause) {
160 /* Allow reads even for write-only mappings */
161 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
162 goto bad_area;
163 } else {
164 if (!(vma->vm_flags & VM_WRITE))
165 goto bad_area;
167 handle_mm_fault(current, vma, address, cause > 0);
168 up(&mm->mmap_sem);
169 goto out;
172 * Something tried to access memory that isn't in our memory map..
173 * Fix it, but check if it's kernel or user first..
175 bad_area:
176 up(&mm->mmap_sem);
178 if (user_mode(regs)) {
179 force_sig(SIGSEGV, current);
180 goto out;
183 /* Are we prepared to handle this fault as an exception? */
184 if ((fixup = search_exception_table(regs->pc)) != 0) {
185 unsigned long newpc;
186 newpc = fixup_exception(dpf_reg, fixup, regs->pc);
187 printk("%s: Exception at [<%lx>] (%lx)\n",
188 current->comm, regs->pc, newpc);
189 regs->pc = newpc;
190 goto out;
194 * Oops. The kernel tried to access some bad page. We'll have to
195 * terminate things with extreme prejudice.
197 printk(KERN_ALERT "Unable to handle kernel paging request at "
198 "virtual address %016lx\n", address);
199 die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16);
200 do_exit(SIGKILL);
201 out:
202 unlock_kernel();