Merge with Linux 2.5.74.
[linux-2.6/linux-mips.git] / arch / sh / mm / fault.c
blobbcfde2fcbc476781641dd429ae332bfd50a512c0
1 /* $Id: fault.c,v 1.10 2003/05/04 19:29:54 lethal Exp $
3 * linux/arch/sh/mm/fault.c
4 * Copyright (C) 1999 Niibe Yutaka
6 * Based on linux/arch/i386/mm/fault.c:
7 * Copyright (C) 1995 Linus Torvalds
8 */
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/smp.h>
20 #include <linux/smp_lock.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
24 #include <asm/system.h>
25 #include <asm/io.h>
26 #include <asm/uaccess.h>
27 #include <asm/pgalloc.h>
28 #include <asm/hardirq.h>
29 #include <asm/mmu_context.h>
30 #include <asm/cacheflush.h>
32 #if defined(CONFIG_SH_KGDB)
33 #include <asm/kgdb.h>
34 #endif
36 extern void die(const char *,struct pt_regs *,long);
39 * This routine handles page faults. It determines the address,
40 * and the problem, and then passes it off to one of the appropriate
41 * routines.
43 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
44 unsigned long address)
46 struct task_struct *tsk;
47 struct mm_struct *mm;
48 struct vm_area_struct * vma;
49 unsigned long page;
50 const struct exception_table_entry *fixup;
52 #if defined(CONFIG_SH_KGDB)
53 if (kgdb_nofault && kgdb_bus_err_hook)
54 kgdb_bus_err_hook();
55 #endif
57 tsk = current;
58 mm = tsk->mm;
61 * If we're in an interrupt or have no user
62 * context, we must not take the fault..
64 if (in_interrupt() || !mm)
65 goto no_context;
67 down_read(&mm->mmap_sem);
69 vma = find_vma(mm, address);
70 if (!vma)
71 goto bad_area;
72 if (vma->vm_start <= address)
73 goto good_area;
74 if (!(vma->vm_flags & VM_GROWSDOWN))
75 goto bad_area;
76 if (expand_stack(vma, address))
77 goto bad_area;
79 * Ok, we have a good vm_area for this memory access, so
80 * we can handle it..
82 good_area:
83 if (writeaccess) {
84 if (!(vma->vm_flags & VM_WRITE))
85 goto bad_area;
86 } else {
87 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
88 goto bad_area;
92 * If for any reason at all we couldn't handle the fault,
93 * make sure we exit gracefully rather than endlessly redo
94 * the fault.
96 survive:
97 switch (handle_mm_fault(mm, vma, address, writeaccess)) {
98 case 1:
99 tsk->min_flt++;
100 break;
101 case 2:
102 tsk->maj_flt++;
103 break;
104 case 0:
105 goto do_sigbus;
106 default:
107 goto out_of_memory;
110 up_read(&mm->mmap_sem);
111 return;
114 * Something tried to access memory that isn't in our memory map..
115 * Fix it, but check if it's kernel or user first..
117 bad_area:
118 up_read(&mm->mmap_sem);
120 if (user_mode(regs)) {
121 tsk->thread.address = address;
122 tsk->thread.error_code = writeaccess;
123 force_sig(SIGSEGV, tsk);
124 return;
127 no_context:
128 /* Are we prepared to handle this kernel fault? */
129 fixup = search_exception_tables(regs->pc);
130 if (fixup) {
131 regs->pc = fixup->fixup;
132 return;
136 * Oops. The kernel tried to access some bad page. We'll have to
137 * terminate things with extreme prejudice.
140 if (address < PAGE_SIZE)
141 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
142 else
143 printk(KERN_ALERT "Unable to handle kernel paging request");
144 printk(" at virtual address %08lx\n", address);
145 printk(KERN_ALERT "pc = %08lx\n", regs->pc);
146 asm volatile("mov.l %1, %0"
147 : "=r" (page)
148 : "m" (__m(MMU_TTB)));
149 if (page) {
150 page = ((unsigned long *) page)[address >> 22];
151 printk(KERN_ALERT "*pde = %08lx\n", page);
152 if (page & _PAGE_PRESENT) {
153 page &= PAGE_MASK;
154 address &= 0x003ff000;
155 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
156 printk(KERN_ALERT "*pte = %08lx\n", page);
159 die("Oops", regs, writeaccess);
160 do_exit(SIGKILL);
163 * We ran out of memory, or some other thing happened to us that made
164 * us unable to handle the page fault gracefully.
166 out_of_memory:
167 up_read(&mm->mmap_sem);
168 if (current->pid == 1) {
169 yield();
170 down_read(&mm->mmap_sem);
171 goto survive;
173 printk("VM: killing process %s\n", tsk->comm);
174 if (user_mode(regs))
175 do_exit(SIGKILL);
176 goto no_context;
178 do_sigbus:
179 up_read(&mm->mmap_sem);
182 * Send a sigbus, regardless of whether we were in kernel
183 * or user mode.
185 tsk->thread.address = address;
186 tsk->thread.error_code = writeaccess;
187 tsk->thread.trap_no = 14;
188 force_sig(SIGBUS, tsk);
190 /* Kernel mode? Handle exceptions or die */
191 if (!user_mode(regs))
192 goto no_context;
196 * Called with interrupt disabled.
198 asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
199 unsigned long address)
201 pgd_t *dir;
202 pmd_t *pmd;
203 pte_t *pte;
204 pte_t entry;
206 #if defined(CONFIG_SH_KGDB)
207 if (kgdb_nofault && kgdb_bus_err_hook)
208 kgdb_bus_err_hook();
209 #endif
210 if (address >= P3SEG && address < P4SEG)
211 dir = pgd_offset_k(address);
212 else if (address >= TASK_SIZE)
213 return 1;
214 else if (!current->mm)
215 return 1;
216 else
217 dir = pgd_offset(current->mm, address);
219 pmd = pmd_offset(dir, address);
220 if (pmd_none(*pmd))
221 return 1;
222 if (pmd_bad(*pmd)) {
223 pmd_ERROR(*pmd);
224 pmd_clear(pmd);
225 return 1;
227 pte = pte_offset_kernel(pmd, address);
228 entry = *pte;
229 if (pte_none(entry) || pte_not_present(entry)
230 || (writeaccess && !pte_write(entry)))
231 return 1;
233 if (writeaccess)
234 entry = pte_mkdirty(entry);
235 entry = pte_mkyoung(entry);
236 #if defined(CONFIG_CPU_SH4)
238 * ITLB is not affected by "ldtlb" instruction.
239 * So, we need to flush the entry by ourselves.
241 __flush_tlb_page(get_asid(), address&PAGE_MASK);
242 #endif
243 set_pte(pte, entry);
244 update_mmu_cache(NULL, address, entry);
245 return 0;
248 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
250 if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
251 unsigned long flags;
252 unsigned long asid;
253 unsigned long saved_asid = MMU_NO_ASID;
255 asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
256 page &= PAGE_MASK;
258 local_irq_save(flags);
259 if (vma->vm_mm != current->mm) {
260 saved_asid = get_asid();
261 set_asid(asid);
263 __flush_tlb_page(asid, page);
264 if (saved_asid != MMU_NO_ASID)
265 set_asid(saved_asid);
266 local_irq_restore(flags);
270 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
271 unsigned long end)
273 struct mm_struct *mm = vma->vm_mm;
275 if (mm->context != NO_CONTEXT) {
276 unsigned long flags;
277 int size;
279 local_irq_save(flags);
280 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
281 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
282 mm->context = NO_CONTEXT;
283 if (mm == current->mm)
284 activate_context(mm);
285 } else {
286 unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK;
287 unsigned long saved_asid = MMU_NO_ASID;
289 start &= PAGE_MASK;
290 end += (PAGE_SIZE - 1);
291 end &= PAGE_MASK;
292 if (mm != current->mm) {
293 saved_asid = get_asid();
294 set_asid(asid);
296 while (start < end) {
297 __flush_tlb_page(asid, start);
298 start += PAGE_SIZE;
300 if (saved_asid != MMU_NO_ASID)
301 set_asid(saved_asid);
303 local_irq_restore(flags);
307 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
309 unsigned long flags;
310 int size;
312 local_irq_save(flags);
313 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
314 if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
315 flush_tlb_all();
316 } else {
317 unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK;
318 unsigned long saved_asid = get_asid();
320 start &= PAGE_MASK;
321 end += (PAGE_SIZE - 1);
322 end &= PAGE_MASK;
323 set_asid(asid);
324 while (start < end) {
325 __flush_tlb_page(asid, start);
326 start += PAGE_SIZE;
328 set_asid(saved_asid);
330 local_irq_restore(flags);
333 void flush_tlb_mm(struct mm_struct *mm)
335 /* Invalidate all TLB of this process. */
336 /* Instead of invalidating each TLB, we get new MMU context. */
337 if (mm->context != NO_CONTEXT) {
338 unsigned long flags;
340 local_irq_save(flags);
341 mm->context = NO_CONTEXT;
342 if (mm == current->mm)
343 activate_context(mm);
344 local_irq_restore(flags);
348 void flush_tlb_all(void)
350 unsigned long flags, status;
353 * Flush all the TLB.
355 * Write to the MMU control register's bit:
356 * TF-bit for SH-3, TI-bit for SH-4.
357 * It's same position, bit #2.
359 local_irq_save(flags);
360 status = ctrl_inl(MMUCR);
361 status |= 0x04;
362 ctrl_outl(status, MMUCR);
363 local_irq_restore(flags);