2 * linux/arch/i386/mm/fault.c
4 * Copyright (C) 1995 Linus Torvalds
7 #include <linux/signal.h>
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
11 #include <linux/string.h>
12 #include <linux/types.h>
13 #include <linux/ptrace.h>
14 #include <linux/mman.h>
16 #include <linux/smp.h>
17 #include <linux/smp_lock.h>
18 #include <linux/interrupt.h>
20 #include <asm/system.h>
21 #include <asm/uaccess.h>
22 #include <asm/pgtable.h>
23 #include <asm/hardirq.h>
25 extern void die(const char *,struct pt_regs
*,long);
28 * Ugly, ugly, but the goto's result in better assembly..
30 int __verify_write(const void * addr
, unsigned long size
)
32 struct vm_area_struct
* vma
;
33 unsigned long start
= (unsigned long) addr
;
38 vma
= find_vma(current
->mm
, start
);
41 if (vma
->vm_start
> start
)
45 if (!(vma
->vm_flags
& VM_WRITE
))
48 size
+= start
& ~PAGE_MASK
;
53 if (handle_mm_fault(current
, vma
, start
, 1) <= 0)
59 if (start
< vma
->vm_end
)
62 if (!vma
|| vma
->vm_start
!= start
)
64 if (!(vma
->vm_flags
& VM_WRITE
))
70 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
72 if (expand_stack(vma
, start
) == 0)
79 asmlinkage
void do_invalid_op(struct pt_regs
*, unsigned long);
80 extern unsigned long idt
;
83 * This routine handles page faults. It determines the address,
84 * and the problem, and then passes it off to one of the appropriate
88 * bit 0 == 0 means no page found, 1 means protection fault
89 * bit 1 == 0 means read, 1 means write
90 * bit 2 == 0 means kernel, 1 means user-mode
92 asmlinkage
void do_page_fault(struct pt_regs
*regs
, unsigned long error_code
)
94 struct task_struct
*tsk
;
96 struct vm_area_struct
* vma
;
97 unsigned long address
;
102 /* get the address */
103 __asm__("movl %%cr2,%0":"=r" (address
));
109 * If we're in an interrupt or have no user
110 * context, we must not take the fault..
112 if (in_interrupt() || !mm
)
117 vma
= find_vma(mm
, address
);
120 if (vma
->vm_start
<= address
)
122 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
124 if (error_code
& 4) {
126 * accessing the stack below %esp is always a bug.
127 * The "+ 32" is there due to some instructions (like
128 * pusha) doing post-decrement on the stack and that
129 * doesn't show up until later..
131 if (address
+ 32 < regs
->esp
)
134 if (expand_stack(vma
, address
))
137 * Ok, we have a good vm_area for this memory access, so
142 switch (error_code
& 3) {
143 default: /* 3: write, present */
144 #ifdef TEST_VERIFY_AREA
145 if (regs
->cs
== KERNEL_CS
)
146 printk("WP fault at %08lx\n", regs
->eip
);
149 case 2: /* write, not present */
150 if (!(vma
->vm_flags
& VM_WRITE
))
154 case 1: /* read, present */
156 case 0: /* read, not present */
157 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
)))
162 * If for any reason at all we couldn't handle the fault,
163 * make sure we exit gracefully rather than endlessly redo
167 int fault
= handle_mm_fault(tsk
, vma
, address
, write
);
175 * Did it hit the DOS screen memory VA from vm86 mode?
177 if (regs
->eflags
& VM_MASK
) {
178 unsigned long bit
= (address
- 0xA0000) >> PAGE_SHIFT
;
180 tsk
->thread
.screen_bitmap
|= 1 << bit
;
186 * Something tried to access memory that isn't in our memory map..
187 * Fix it, but check if it's kernel or user first..
192 /* User mode accesses just cause a SIGSEGV */
193 if (error_code
& 4) {
194 tsk
->thread
.cr2
= address
;
195 tsk
->thread
.error_code
= error_code
;
196 tsk
->thread
.trap_no
= 14;
197 force_sig(SIGSEGV
, tsk
);
202 * Pentium F0 0F C7 C8 bug workaround.
204 if (boot_cpu_data
.f00f_bug
) {
207 nr
= (address
- idt
) >> 3;
210 do_invalid_op(regs
, 0);
216 /* Are we prepared to handle this kernel fault? */
217 if ((fixup
= search_exception_table(regs
->eip
)) != 0) {
223 * Oops. The kernel tried to access some bad page. We'll have to
224 * terminate things with extreme prejudice.
226 * First we check if it was the bootup rw-test, though..
228 if (boot_cpu_data
.wp_works_ok
< 0 &&
229 address
== PAGE_OFFSET
&& (error_code
& 1)) {
230 boot_cpu_data
.wp_works_ok
= 1;
231 pg0
[0] = pte_val(mk_pte(PAGE_OFFSET
, PAGE_KERNEL
));
234 * Beware: Black magic here. The printk is needed here to flush
235 * CPU state on certain buggy processors.
241 if (address
< PAGE_SIZE
)
242 printk(KERN_ALERT
"Unable to handle kernel NULL pointer dereference");
244 printk(KERN_ALERT
"Unable to handle kernel paging request");
245 printk(" at virtual address %08lx\n",address
);
246 printk(" printing eip:\n");
247 printk("%08lx\n", regs
->eip
);
248 __asm__("movl %%cr3,%0" : "=r" (page
));
249 printk(KERN_ALERT
"current->thread.cr3 = %08lx, %%cr3 = %08lx\n",
250 tsk
->thread
.cr3
, page
);
251 page
= ((unsigned long *) __va(page
))[address
>> 22];
252 printk(KERN_ALERT
"*pde = %08lx\n", page
);
255 address
&= 0x003ff000;
256 page
= ((unsigned long *) __va(page
))[address
>> PAGE_SHIFT
];
257 printk(KERN_ALERT
"*pte = %08lx\n", page
);
259 die("Oops", regs
, error_code
);
263 * We ran out of memory, or some other thing happened to us that made
264 * us unable to handle the page fault gracefully.
268 printk("VM: killing process %s\n", tsk
->comm
);
277 * Send a sigbus, regardless of whether we were in kernel
280 tsk
->thread
.cr2
= address
;
281 tsk
->thread
.error_code
= error_code
;
282 tsk
->thread
.trap_no
= 14;
283 force_sig(SIGBUS
, tsk
);
285 /* Kernel mode? Handle exceptions or die */
286 if (!(error_code
& 4))