1 /* $Id: fault.c,v 1.122 2001/11/17 07:19:26 davem Exp $
2 * fault.c: Page fault handlers for the Sparc.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
11 #include <linux/string.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <linux/ptrace.h>
15 #include <linux/mman.h>
16 #include <linux/threads.h>
17 #include <linux/kernel.h>
18 #include <linux/signal.h>
20 #include <linux/smp.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/kdebug.h>
25 #include <asm/system.h>
27 #include <asm/pgtable.h>
28 #include <asm/memreg.h>
29 #include <asm/openprom.h>
30 #include <asm/oplib.h>
32 #include <asm/traps.h>
33 #include <asm/uaccess.h>
35 extern int prom_node_root
;
37 /* At boot time we determine these two values necessary for setting
38 * up the segment maps and page table entries (pte's).
41 int num_segmaps
, num_contexts
;
44 /* various Virtual Address Cache parameters we find at boot time... */
46 int vac_size
, vac_linesize
, vac_do_hw_vac_flushes
;
47 int vac_entries_per_context
, vac_entries_per_segment
;
48 int vac_entries_per_page
;
50 /* Return how much physical memory we have. */
51 unsigned long probe_memory(void)
53 unsigned long total
= 0;
56 for (i
= 0; sp_banks
[i
].num_bytes
; i
++)
57 total
+= sp_banks
[i
].num_bytes
;
62 extern void sun4c_complete_all_stores(void);
64 /* Whee, a level 15 NMI interrupt memory error. Let's have fun... */
65 asmlinkage
void sparc_lvl15_nmi(struct pt_regs
*regs
, unsigned long serr
,
66 unsigned long svaddr
, unsigned long aerr
,
69 sun4c_complete_all_stores();
70 printk("FAULT: NMI received\n");
71 printk("SREGS: Synchronous Error %08lx\n", serr
);
72 printk(" Synchronous Vaddr %08lx\n", svaddr
);
73 printk(" Asynchronous Error %08lx\n", aerr
);
74 printk(" Asynchronous Vaddr %08lx\n", avaddr
);
76 printk(" Memory Parity Error %08lx\n", *sun4c_memerr_reg
);
77 printk("REGISTER DUMP:\n");
82 static void unhandled_fault(unsigned long, struct task_struct
*,
83 struct pt_regs
*) __attribute__ ((noreturn
));
85 static void unhandled_fault(unsigned long address
, struct task_struct
*tsk
,
88 if((unsigned long) address
< PAGE_SIZE
) {
90 "Unable to handle kernel NULL pointer dereference\n");
92 printk(KERN_ALERT
"Unable to handle kernel paging request "
93 "at virtual address %08lx\n", address
);
95 printk(KERN_ALERT
"tsk->{mm,active_mm}->context = %08lx\n",
96 (tsk
->mm
? tsk
->mm
->context
: tsk
->active_mm
->context
));
97 printk(KERN_ALERT
"tsk->{mm,active_mm}->pgd = %08lx\n",
98 (tsk
->mm
? (unsigned long) tsk
->mm
->pgd
:
99 (unsigned long) tsk
->active_mm
->pgd
));
100 die_if_kernel("Oops", regs
);
103 asmlinkage
int lookup_fault(unsigned long pc
, unsigned long ret_pc
,
104 unsigned long address
)
111 i
= search_extables_range(ret_pc
, &g2
);
114 /* load & store will be handled by fixup */
118 /* store will be handled by fixup, load will bump out */
119 /* for _to_ macros */
120 insn
= *((unsigned int *) pc
);
121 if ((insn
>> 21) & 1)
126 /* load will be handled by fixup, store will bump out */
127 /* for _from_ macros */
128 insn
= *((unsigned int *) pc
);
129 if (!((insn
>> 21) & 1) || ((insn
>>19)&0x3f) == 15)
137 memset(®s
, 0, sizeof (regs
));
140 __asm__
__volatile__(
144 "nop\n" : "=r" (regs
.psr
));
145 unhandled_fault(address
, current
, ®s
);
151 extern unsigned long safe_compute_effective_address(struct pt_regs
*,
154 static unsigned long compute_si_addr(struct pt_regs
*regs
, int text_fault
)
161 if (regs
->psr
& PSR_PS
) {
162 insn
= *(unsigned int *) regs
->pc
;
164 __get_user(insn
, (unsigned int *) regs
->pc
);
167 return safe_compute_effective_address(regs
, insn
);
170 asmlinkage
void do_sparc_fault(struct pt_regs
*regs
, int text_fault
, int write
,
171 unsigned long address
)
173 struct vm_area_struct
*vma
;
174 struct task_struct
*tsk
= current
;
175 struct mm_struct
*mm
= tsk
->mm
;
179 int from_user
= !(regs
->psr
& PSR_PS
);
186 * We fault-in kernel-space virtual memory on-demand. The
187 * 'reference' page table is init_mm.pgd.
189 * NOTE! We MUST NOT take any locks for this case. We may
190 * be in an interrupt or a critical region, and should
191 * only copy the information from the master page table,
194 if (!ARCH_SUN4C_SUN4
&& address
>= TASK_SIZE
)
197 info
.si_code
= SEGV_MAPERR
;
200 * If we're in an interrupt or have no user
201 * context, we must not take the fault..
203 if (in_atomic() || !mm
)
206 down_read(&mm
->mmap_sem
);
209 * The kernel referencing a bad kernel pointer can lock up
210 * a sun4c machine completely, so we must attempt recovery.
212 if(!from_user
&& address
>= PAGE_OFFSET
)
215 vma
= find_vma(mm
, address
);
218 if(vma
->vm_start
<= address
)
220 if(!(vma
->vm_flags
& VM_GROWSDOWN
))
222 if(expand_stack(vma
, address
))
225 * Ok, we have a good vm_area for this memory access, so
229 info
.si_code
= SEGV_ACCERR
;
231 if(!(vma
->vm_flags
& VM_WRITE
))
234 /* Allow reads even for write-only mappings */
235 if(!(vma
->vm_flags
& (VM_READ
| VM_EXEC
)))
240 * If for any reason at all we couldn't handle the fault,
241 * make sure we exit gracefully rather than endlessly redo
244 fault
= handle_mm_fault(mm
, vma
, address
, write
);
245 if (unlikely(fault
& VM_FAULT_ERROR
)) {
246 if (fault
& VM_FAULT_OOM
)
248 else if (fault
& VM_FAULT_SIGBUS
)
252 if (fault
& VM_FAULT_MAJOR
)
256 up_read(&mm
->mmap_sem
);
260 * Something tried to access memory that isn't in our memory map..
261 * Fix it, but check if it's kernel or user first..
264 up_read(&mm
->mmap_sem
);
266 bad_area_nosemaphore
:
267 /* User mode accesses just cause a SIGSEGV */
270 printk("Fault whee %s [%d]: segfaults at %08lx pc=%08lx\n",
271 tsk
->comm
, tsk
->pid
, address
, regs
->pc
);
273 info
.si_signo
= SIGSEGV
;
275 /* info.si_code set above to make clear whether
276 this was a SEGV_MAPERR or SEGV_ACCERR fault. */
277 info
.si_addr
= (void __user
*)compute_si_addr(regs
, text_fault
);
279 force_sig_info (SIGSEGV
, &info
, tsk
);
283 /* Is this in ex_table? */
285 g2
= regs
->u_regs
[UREG_G2
];
286 if (!from_user
&& (fixup
= search_extables_range(regs
->pc
, &g2
))) {
287 if (fixup
> 10) { /* Values below are reserved for other things */
288 extern const unsigned __memset_start
[];
289 extern const unsigned __memset_end
[];
290 extern const unsigned __csum_partial_copy_start
[];
291 extern const unsigned __csum_partial_copy_end
[];
293 #ifdef DEBUG_EXCEPTIONS
294 printk("Exception: PC<%08lx> faddr<%08lx>\n", regs
->pc
, address
);
295 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
296 regs
->pc
, fixup
, g2
);
298 if ((regs
->pc
>= (unsigned long)__memset_start
&&
299 regs
->pc
< (unsigned long)__memset_end
) ||
300 (regs
->pc
>= (unsigned long)__csum_partial_copy_start
&&
301 regs
->pc
< (unsigned long)__csum_partial_copy_end
)) {
302 regs
->u_regs
[UREG_I4
] = address
;
303 regs
->u_regs
[UREG_I5
] = regs
->pc
;
305 regs
->u_regs
[UREG_G2
] = g2
;
307 regs
->npc
= regs
->pc
+ 4;
312 unhandled_fault (address
, tsk
, regs
);
316 * We ran out of memory, or some other thing happened to us that made
317 * us unable to handle the page fault gracefully.
320 up_read(&mm
->mmap_sem
);
321 printk("VM: killing process %s\n", tsk
->comm
);
323 do_group_exit(SIGKILL
);
327 up_read(&mm
->mmap_sem
);
328 info
.si_signo
= SIGBUS
;
330 info
.si_code
= BUS_ADRERR
;
331 info
.si_addr
= (void __user
*) compute_si_addr(regs
, text_fault
);
333 force_sig_info (SIGBUS
, &info
, tsk
);
340 * Synchronize this task's top level page-table
341 * with the 'reference' page table.
343 int offset
= pgd_index(address
);
347 pgd
= tsk
->active_mm
->pgd
+ offset
;
348 pgd_k
= init_mm
.pgd
+ offset
;
350 if (!pgd_present(*pgd
)) {
351 if (!pgd_present(*pgd_k
))
352 goto bad_area_nosemaphore
;
353 pgd_val(*pgd
) = pgd_val(*pgd_k
);
357 pmd
= pmd_offset(pgd
, address
);
358 pmd_k
= pmd_offset(pgd_k
, address
);
360 if (pmd_present(*pmd
) || !pmd_present(*pmd_k
))
361 goto bad_area_nosemaphore
;
367 asmlinkage
void do_sun4c_fault(struct pt_regs
*regs
, int text_fault
, int write
,
368 unsigned long address
)
370 extern void sun4c_update_mmu_cache(struct vm_area_struct
*,
371 unsigned long,pte_t
);
372 extern pte_t
*sun4c_pte_offset_kernel(pmd_t
*,unsigned long);
373 struct task_struct
*tsk
= current
;
374 struct mm_struct
*mm
= tsk
->mm
;
381 !(regs
->psr
& PSR_PS
)) {
382 unsigned int insn
, __user
*ip
;
384 ip
= (unsigned int __user
*)regs
->pc
;
385 if (!get_user(insn
, ip
)) {
386 if ((insn
& 0xc1680000) == 0xc0680000)
392 /* We are oopsing. */
393 do_sparc_fault(regs
, text_fault
, write
, address
);
394 BUG(); /* P3 Oops already, you bitch */
397 pgdp
= pgd_offset(mm
, address
);
398 ptep
= sun4c_pte_offset_kernel((pmd_t
*) pgdp
, address
);
400 if (pgd_val(*pgdp
)) {
402 if ((pte_val(*ptep
) & (_SUN4C_PAGE_WRITE
|_SUN4C_PAGE_PRESENT
))
403 == (_SUN4C_PAGE_WRITE
|_SUN4C_PAGE_PRESENT
)) {
406 *ptep
= __pte(pte_val(*ptep
) | _SUN4C_PAGE_ACCESSED
|
407 _SUN4C_PAGE_MODIFIED
|
411 local_irq_save(flags
);
412 if (sun4c_get_segmap(address
) != invalid_segment
) {
413 sun4c_put_pte(address
, pte_val(*ptep
));
414 local_irq_restore(flags
);
417 local_irq_restore(flags
);
420 if ((pte_val(*ptep
) & (_SUN4C_PAGE_READ
|_SUN4C_PAGE_PRESENT
))
421 == (_SUN4C_PAGE_READ
|_SUN4C_PAGE_PRESENT
)) {
424 *ptep
= __pte(pte_val(*ptep
) | _SUN4C_PAGE_ACCESSED
|
427 local_irq_save(flags
);
428 if (sun4c_get_segmap(address
) != invalid_segment
) {
429 sun4c_put_pte(address
, pte_val(*ptep
));
430 local_irq_restore(flags
);
433 local_irq_restore(flags
);
438 /* This conditional is 'interesting'. */
439 if (pgd_val(*pgdp
) && !(write
&& !(pte_val(*ptep
) & _SUN4C_PAGE_WRITE
))
440 && (pte_val(*ptep
) & _SUN4C_PAGE_VALID
))
441 /* Note: It is safe to not grab the MMAP semaphore here because
442 * we know that update_mmu_cache() will not sleep for
443 * any reason (at least not in the current implementation)
444 * and therefore there is no danger of another thread getting
445 * on the CPU and doing a shrink_mmap() on this vma.
447 sun4c_update_mmu_cache (find_vma(current
->mm
, address
), address
,
450 do_sparc_fault(regs
, text_fault
, write
, address
);
453 /* This always deals with user addresses. */
454 inline void force_user_fault(unsigned long address
, int write
)
456 struct vm_area_struct
*vma
;
457 struct task_struct
*tsk
= current
;
458 struct mm_struct
*mm
= tsk
->mm
;
461 info
.si_code
= SEGV_MAPERR
;
464 printk("wf<pid=%d,wr=%d,addr=%08lx>\n",
465 tsk
->pid
, write
, address
);
467 down_read(&mm
->mmap_sem
);
468 vma
= find_vma(mm
, address
);
471 if(vma
->vm_start
<= address
)
473 if(!(vma
->vm_flags
& VM_GROWSDOWN
))
475 if(expand_stack(vma
, address
))
478 info
.si_code
= SEGV_ACCERR
;
480 if(!(vma
->vm_flags
& VM_WRITE
))
483 if(!(vma
->vm_flags
& (VM_READ
| VM_EXEC
)))
486 switch (handle_mm_fault(mm
, vma
, address
, write
)) {
487 case VM_FAULT_SIGBUS
:
491 up_read(&mm
->mmap_sem
);
494 up_read(&mm
->mmap_sem
);
496 printk("Window whee %s [%d]: segfaults at %08lx\n",
497 tsk
->comm
, tsk
->pid
, address
);
499 info
.si_signo
= SIGSEGV
;
501 /* info.si_code set above to make clear whether
502 this was a SEGV_MAPERR or SEGV_ACCERR fault. */
503 info
.si_addr
= (void __user
*) address
;
505 force_sig_info (SIGSEGV
, &info
, tsk
);
509 up_read(&mm
->mmap_sem
);
510 info
.si_signo
= SIGBUS
;
512 info
.si_code
= BUS_ADRERR
;
513 info
.si_addr
= (void __user
*) address
;
515 force_sig_info (SIGBUS
, &info
, tsk
);
518 void window_overflow_fault(void)
522 sp
= current_thread_info()->rwbuf_stkptrs
[0];
523 if(((sp
+ 0x38) & PAGE_MASK
) != (sp
& PAGE_MASK
))
524 force_user_fault(sp
+ 0x38, 1);
525 force_user_fault(sp
, 1);
528 void window_underflow_fault(unsigned long sp
)
530 if(((sp
+ 0x38) & PAGE_MASK
) != (sp
& PAGE_MASK
))
531 force_user_fault(sp
+ 0x38, 0);
532 force_user_fault(sp
, 0);
535 void window_ret_fault(struct pt_regs
*regs
)
539 sp
= regs
->u_regs
[UREG_FP
];
540 if(((sp
+ 0x38) & PAGE_MASK
) != (sp
& PAGE_MASK
))
541 force_user_fault(sp
+ 0x38, 0);
542 force_user_fault(sp
, 0);