2 * linux/arch/alpha/mm/fault.c
4 * Copyright (C) 1995 Linus Torvalds
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
11 #define __EXTERN_INLINE inline
12 #include <asm/mmu_context.h>
13 #include <asm/pgtable.h>
14 #undef __EXTERN_INLINE
16 #include <linux/signal.h>
17 #include <linux/head.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
23 #include <linux/smp.h>
24 #include <linux/smp_lock.h>
26 #include <asm/system.h>
27 #include <asm/uaccess.h>
29 extern void die_if_kernel(char *,struct pt_regs
*,long, unsigned long *);
33 unsigned long last_asn
[NR_CPUS
] = { /* gag */
34 ASN_FIRST_VERSION
+ (0 << WIDTH_HARDWARE_ASN
),
35 ASN_FIRST_VERSION
+ (1 << WIDTH_HARDWARE_ASN
),
36 ASN_FIRST_VERSION
+ (2 << WIDTH_HARDWARE_ASN
),
37 ASN_FIRST_VERSION
+ (3 << WIDTH_HARDWARE_ASN
),
38 ASN_FIRST_VERSION
+ (4 << WIDTH_HARDWARE_ASN
),
39 ASN_FIRST_VERSION
+ (5 << WIDTH_HARDWARE_ASN
),
40 ASN_FIRST_VERSION
+ (6 << WIDTH_HARDWARE_ASN
),
41 ASN_FIRST_VERSION
+ (7 << WIDTH_HARDWARE_ASN
),
42 ASN_FIRST_VERSION
+ (8 << WIDTH_HARDWARE_ASN
),
43 ASN_FIRST_VERSION
+ (9 << WIDTH_HARDWARE_ASN
),
44 ASN_FIRST_VERSION
+ (10 << WIDTH_HARDWARE_ASN
),
45 ASN_FIRST_VERSION
+ (11 << WIDTH_HARDWARE_ASN
),
46 ASN_FIRST_VERSION
+ (12 << WIDTH_HARDWARE_ASN
),
47 ASN_FIRST_VERSION
+ (13 << WIDTH_HARDWARE_ASN
),
48 ASN_FIRST_VERSION
+ (14 << WIDTH_HARDWARE_ASN
),
49 ASN_FIRST_VERSION
+ (15 << WIDTH_HARDWARE_ASN
),
50 ASN_FIRST_VERSION
+ (16 << WIDTH_HARDWARE_ASN
),
51 ASN_FIRST_VERSION
+ (17 << WIDTH_HARDWARE_ASN
),
52 ASN_FIRST_VERSION
+ (18 << WIDTH_HARDWARE_ASN
),
53 ASN_FIRST_VERSION
+ (19 << WIDTH_HARDWARE_ASN
),
54 ASN_FIRST_VERSION
+ (20 << WIDTH_HARDWARE_ASN
),
55 ASN_FIRST_VERSION
+ (21 << WIDTH_HARDWARE_ASN
),
56 ASN_FIRST_VERSION
+ (22 << WIDTH_HARDWARE_ASN
),
57 ASN_FIRST_VERSION
+ (23 << WIDTH_HARDWARE_ASN
),
58 ASN_FIRST_VERSION
+ (24 << WIDTH_HARDWARE_ASN
),
59 ASN_FIRST_VERSION
+ (25 << WIDTH_HARDWARE_ASN
),
60 ASN_FIRST_VERSION
+ (26 << WIDTH_HARDWARE_ASN
),
61 ASN_FIRST_VERSION
+ (27 << WIDTH_HARDWARE_ASN
),
62 ASN_FIRST_VERSION
+ (28 << WIDTH_HARDWARE_ASN
),
63 ASN_FIRST_VERSION
+ (29 << WIDTH_HARDWARE_ASN
),
64 ASN_FIRST_VERSION
+ (30 << WIDTH_HARDWARE_ASN
),
65 ASN_FIRST_VERSION
+ (31 << WIDTH_HARDWARE_ASN
)
68 unsigned long asn_cache
= ASN_FIRST_VERSION
;
72 * Select a new ASN for a task.
76 get_new_mmu_context(struct task_struct
*p
, struct mm_struct
*mm
)
78 unsigned long asn
= asn_cache
;
80 if ((asn
& HARDWARE_ASN_MASK
) < MAX_ASN
)
85 asn
= (asn
& ~HARDWARE_ASN_MASK
) + ASN_FIRST_VERSION
;
88 mm
->context
= asn
; /* full version + asn */
89 p
->tss
.asn
= asn
& HARDWARE_ASN_MASK
; /* just asn */
93 * This routine handles page faults. It determines the address,
94 * and the problem, and then passes it off to handle_mm_fault().
97 * 0 = translation not valid
98 * 1 = access violation
100 * 3 = fault-on-execute
104 * -1 = instruction fetch
108 * Registers $9 through $15 are saved in a block just prior to `regs' and
109 * are saved and restored around the call to allow exception code to
113 /* Macro for exception fixup code to access integer registers. */
115 (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
116 (r) <= 18 ? (r)+8 : (r)-10])
119 do_page_fault(unsigned long address
, unsigned long mmcsr
,
120 long cause
, struct pt_regs
*regs
)
122 struct vm_area_struct
* vma
;
123 struct mm_struct
*mm
= current
->mm
;
126 /* As of EV6, a load into $31/$f31 is a prefetch, and never faults
127 (or is suppressed by the PALcode). Support that for older CPUs
128 by ignoring such an instruction. */
131 __get_user(insn
, (unsigned int *)regs
->pc
);
132 if ((insn
>> 21 & 0x1f) == 0x1f &&
133 /* ldq ldl ldt lds ldg ldf ldwu ldbu */
134 (1ul << (insn
>> 26) & 0x30f00001400ul
)) {
142 vma
= find_vma(mm
, address
);
145 if (vma
->vm_start
<= address
)
147 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
149 if (expand_stack(vma
, address
))
152 * Ok, we have a good vm_area for this memory access, so
157 if (!(vma
->vm_flags
& VM_EXEC
))
160 /* Allow reads even for write-only mappings */
161 if (!(vma
->vm_flags
& (VM_READ
| VM_WRITE
)))
164 if (!(vma
->vm_flags
& VM_WRITE
))
167 handle_mm_fault(current
, vma
, address
, cause
> 0);
172 * Something tried to access memory that isn't in our memory map..
173 * Fix it, but check if it's kernel or user first..
178 if (user_mode(regs
)) {
179 force_sig(SIGSEGV
, current
);
183 /* Are we prepared to handle this fault as an exception? */
184 if ((fixup
= search_exception_table(regs
->pc
)) != 0) {
186 newpc
= fixup_exception(dpf_reg
, fixup
, regs
->pc
);
187 printk("%s: Exception at [<%lx>] (%lx)\n",
188 current
->comm
, regs
->pc
, newpc
);
194 * Oops. The kernel tried to access some bad page. We'll have to
195 * terminate things with extreme prejudice.
197 printk(KERN_ALERT
"Unable to handle kernel paging request at "
198 "virtual address %016lx\n", address
);
199 die_if_kernel("Oops", regs
, cause
, (unsigned long*)regs
- 16);