Import 2.1.33
[davej-history.git] / arch / i386 / mm / fault.c
blob65a4a67d3443763a3a0def0d336ac964ccbfd485
1 /*
2 * linux/arch/i386/mm/fault.c
4 * Copyright (C) 1995 Linus Torvalds
5 */
7 #include <linux/signal.h>
8 #include <linux/sched.h>
9 #include <linux/head.h>
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/ptrace.h>
15 #include <linux/mman.h>
16 #include <linux/mm.h>
17 #include <linux/smp.h>
18 #include <linux/smp_lock.h>
20 #include <asm/system.h>
21 #include <asm/uaccess.h>
22 #include <asm/pgtable.h>
24 extern void die_if_kernel(const char *,struct pt_regs *,long);
27 * Ugly, ugly, but the goto's result in better assembly..
29 int __verify_write(const void * addr, unsigned long size)
31 struct vm_area_struct * vma;
32 unsigned long start = (unsigned long) addr;
34 if (!size)
35 return 1;
37 vma = find_vma(current->mm, start);
38 if (!vma)
39 goto bad_area;
40 if (vma->vm_start > start)
41 goto check_stack;
43 good_area:
44 if (!(vma->vm_flags & VM_WRITE))
45 goto bad_area;
46 size--;
47 size += start & ~PAGE_MASK;
48 size >>= PAGE_SHIFT;
49 start &= PAGE_MASK;
51 for (;;) {
52 do_wp_page(current, vma, start, 1);
53 if (!size)
54 break;
55 size--;
56 start += PAGE_SIZE;
57 if (start < vma->vm_end)
58 continue;
59 vma = vma->vm_next;
60 if (!vma || vma->vm_start != start)
61 goto bad_area;
62 if (!(vma->vm_flags & VM_WRITE))
63 goto bad_area;;
65 return 1;
67 check_stack:
68 if (!(vma->vm_flags & VM_GROWSDOWN))
69 goto bad_area;
70 if (expand_stack(vma, start) == 0)
71 goto good_area;
73 bad_area:
74 return 0;
78 * This routine handles page faults. It determines the address,
79 * and the problem, and then passes it off to one of the appropriate
80 * routines.
82 * error_code:
83 * bit 0 == 0 means no page found, 1 means protection fault
84 * bit 1 == 0 means read, 1 means write
85 * bit 2 == 0 means kernel, 1 means user-mode
87 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
89 void (*handler)(struct task_struct *,
90 struct vm_area_struct *,
91 unsigned long,
92 int);
93 struct task_struct *tsk = current;
94 struct mm_struct *mm = tsk->mm;
95 struct vm_area_struct * vma;
96 unsigned long address;
97 unsigned long page;
98 unsigned long fixup;
99 int write;
101 lock_kernel();
103 /* get the address */
104 __asm__("movl %%cr2,%0":"=r" (address));
105 down(&mm->mmap_sem);
106 vma = find_vma(mm, address);
107 if (!vma)
108 goto bad_area;
109 if (vma->vm_start <= address)
110 goto good_area;
111 if (!(vma->vm_flags & VM_GROWSDOWN))
112 goto bad_area;
113 if (error_code & 4) {
115 * accessing the stack below %esp is always a bug.
116 * The "+ 32" is there due to some instructions (like
117 * pusha) doing pre-decrement on the stack and that
118 * doesn't show up until later..
120 if (address + 32 < regs->esp)
121 goto bad_area;
123 if (expand_stack(vma, address))
124 goto bad_area;
126 * Ok, we have a good vm_area for this memory access, so
127 * we can handle it..
129 good_area:
130 write = 0;
131 handler = do_no_page;
132 switch (error_code & 3) {
133 default: /* 3: write, present */
134 handler = do_wp_page;
135 #ifdef TEST_VERIFY_AREA
136 if (regs->cs == KERNEL_CS)
137 printk("WP fault at %08lx\n", regs->eip);
138 #endif
139 /* fall through */
140 case 2: /* write, not present */
141 if (!(vma->vm_flags & VM_WRITE))
142 goto bad_area;
143 write++;
144 break;
145 case 1: /* read, present */
146 goto bad_area;
147 case 0: /* read, not present */
148 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
149 goto bad_area;
151 handler(tsk, vma, address, write);
152 up(&mm->mmap_sem);
154 * Did it hit the DOS screen memory VA from vm86 mode?
156 if (regs->eflags & VM_MASK) {
157 unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
158 if (bit < 32)
159 tsk->tss.screen_bitmap |= 1 << bit;
161 goto out;
164 * Something tried to access memory that isn't in our memory map..
165 * Fix it, but check if it's kernel or user first..
167 bad_area:
168 up(&mm->mmap_sem);
170 /* Are we prepared to handle this fault? */
171 if ((fixup = search_exception_table(regs->eip)) != 0) {
172 printk(KERN_DEBUG "Exception at [<%lx>] (%lx)\n", regs->eip, fixup);
173 regs->eip = fixup;
174 goto out;
177 if (error_code & 4) {
178 tsk->tss.cr2 = address;
179 tsk->tss.error_code = error_code;
180 tsk->tss.trap_no = 14;
181 force_sig(SIGSEGV, tsk);
182 goto out;
185 * Oops. The kernel tried to access some bad page. We'll have to
186 * terminate things with extreme prejudice.
188 * First we check if it was the bootup rw-test, though..
190 if (wp_works_ok < 0 && address == 0xc0000000 && (error_code & 1)) {
191 wp_works_ok = 1;
192 pg0[0] = pte_val(mk_pte(0, PAGE_SHARED));
193 flush_tlb();
194 goto out;
196 if (address < PAGE_SIZE) {
197 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
198 pg0[0] = pte_val(mk_pte(0, PAGE_SHARED));
199 } else
200 printk(KERN_ALERT "Unable to handle kernel paging request");
201 printk(" at virtual address %08lx\n",address);
202 __asm__("movl %%cr3,%0" : "=r" (page));
203 printk(KERN_ALERT "current->tss.cr3 = %08lx, %%cr3 = %08lx\n",
204 tsk->tss.cr3, page);
205 page = ((unsigned long *) __va(page))[address >> 22];
206 printk(KERN_ALERT "*pde = %08lx\n", page);
207 if (page & 1) {
208 page &= PAGE_MASK;
209 address &= 0x003ff000;
210 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
211 printk(KERN_ALERT "*pte = %08lx\n", page);
213 die_if_kernel("Oops", regs, error_code);
214 do_exit(SIGKILL);
215 out:
216 unlock_kernel();