Import 2.3.16
[davej-history.git] / arch / ppc / mm / fault.c
blob5240875413bc3a57ce5f9da0f8370991299a9ccc
1 /*
2 * arch/ppc/mm/fault.c
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Derived from "arch/i386/mm/fault.c"
8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
10 * Modified by Cort Dougan and Paul Mackerras.
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #include <linux/config.h>
19 #include <linux/signal.h>
20 #include <linux/sched.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/string.h>
24 #include <linux/types.h>
25 #include <linux/ptrace.h>
26 #include <linux/mman.h>
27 #include <linux/mm.h>
28 #include <linux/interrupt.h>
30 #include <asm/page.h>
31 #include <asm/pgtable.h>
32 #include <asm/mmu.h>
33 #include <asm/mmu_context.h>
34 #include <asm/system.h>
35 #include <asm/uaccess.h>
37 #if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
38 extern void (*debugger)(struct pt_regs *);
39 extern void (*debugger_fault_handler)(struct pt_regs *);
40 extern int (*debugger_dabr_match)(struct pt_regs *);
41 int debugger_kernel_faults = 1;
42 #endif
44 unsigned long htab_reloads = 0; /* updated by head.S:hash_page() */
45 unsigned long htab_evicts = 0; /* updated by head.S:hash_page() */
46 unsigned long pte_misses = 0; /* updated by do_page_fault() */
47 unsigned long pte_errors = 0; /* updated by do_page_fault() */
48 unsigned int probingmem = 0;
50 extern void die_if_kernel(char *, struct pt_regs *, long);
51 void bad_page_fault(struct pt_regs *, unsigned long);
52 void do_page_fault(struct pt_regs *, unsigned long, unsigned long);
55 * The error_code parameter is DSISR for a data fault, SRR1 for
56 * an instruction fault.
58 void do_page_fault(struct pt_regs *regs, unsigned long address,
59 unsigned long error_code)
61 struct vm_area_struct * vma;
62 struct mm_struct *mm = current->mm;
64 /*printk("address: %08lx nip:%08lx code: %08lx %s%s%s%s%s%s\n",
65 address,regs->nip,error_code,
66 (error_code&0x40000000)?"604 tlb&htab miss ":"",
67 (error_code&0x20000000)?"603 tlbmiss ":"",
68 (error_code&0x02000000)?"write ":"",
69 (error_code&0x08000000)?"prot ":"",
70 (error_code&0x80000000)?"I/O ":"",
71 (regs->trap == 0x400)?"instr":"data"
72 );*/
74 #if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
75 if (debugger_fault_handler && regs->trap == 0x300) {
76 debugger_fault_handler(regs);
77 return;
79 if (error_code & 0x00400000) {
80 /* DABR match */
81 if (debugger_dabr_match(regs))
82 return;
84 #endif
85 if (in_interrupt()) {
86 static int complained;
87 if (complained < 20) {
88 ++complained;
89 printk("page fault in interrupt handler, addr=%lx\n",
90 address);
91 show_regs(regs);
94 if (current == NULL || mm == NULL) {
95 bad_page_fault(regs, address);
96 return;
98 down(&mm->mmap_sem);
99 vma = find_vma(mm, address);
100 if (!vma)
101 goto bad_area;
102 if (vma->vm_start <= address)
103 goto good_area;
104 if (!(vma->vm_flags & VM_GROWSDOWN))
105 goto bad_area;
106 if (expand_stack(vma, address))
107 goto bad_area;
109 good_area:
110 #ifdef CONFIG_6xx
111 if (error_code & 0x95700000)
112 /* an error such as lwarx to I/O controller space,
113 address matching DABR, eciwx, etc. */
114 #endif /* CONFIG_6xx */
115 #ifdef CONFIG_8xx
116 /* The MPC8xx seems to always set 0x80000000, which is
117 * "undefined". Of those that can be set, this is the only
118 * one which seems bad.
120 if (error_code & 0x10000000)
121 /* Guarded storage error. */
122 #endif /* CONFIG_8xx */
123 goto bad_area;
126 /* a write */
127 if (error_code & 0x02000000) {
128 if (!(vma->vm_flags & VM_WRITE))
129 goto bad_area;
130 /* a read */
131 } else {
132 /* protection fault */
133 if (error_code & 0x08000000)
134 goto bad_area;
135 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
136 goto bad_area;
138 if (!handle_mm_fault(current, vma, address, error_code & 0x02000000))
139 goto bad_area;
140 up(&mm->mmap_sem);
142 * keep track of tlb+htab misses that are good addrs but
143 * just need pte's created via handle_mm_fault()
144 * -- Cort
146 pte_misses++;
147 return;
149 bad_area:
151 up(&mm->mmap_sem);
152 pte_errors++;
153 bad_page_fault(regs, address);
156 void
157 bad_page_fault(struct pt_regs *regs, unsigned long address)
159 unsigned long fixup;
161 if (user_mode(regs)) {
162 force_sig(SIGSEGV, current);
163 return;
166 /* Are we prepared to handle this fault? */
167 if ((fixup = search_exception_table(regs->nip)) != 0) {
168 regs->nip = fixup;
169 return;
172 /* kernel has accessed a bad area */
173 show_regs(regs);
174 #if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
175 if (debugger_kernel_faults)
176 debugger(regs);
177 #endif
178 print_backtrace( (unsigned long *)regs->gpr[1] );
179 panic("kernel access of bad area pc %lx lr %lx address %lX tsk %s/%d",
180 regs->nip,regs->link,address,current->comm,current->pid);
183 #ifdef CONFIG_8xx
185 * I need a va to pte function for the MPC8xx so I can set the cache
186 * attributes on individual pages used by the Communication Processor
187 * Module.
189 pte_t *va_to_pte(struct task_struct *tsk, unsigned long address)
191 pgd_t *dir;
192 pmd_t *pmd;
193 pte_t *pte;
195 dir = pgd_offset(tsk->mm, address & PAGE_MASK);
196 if (dir)
198 pmd = pmd_offset(dir, address & PAGE_MASK);
199 if (pmd && pmd_present(*pmd))
201 pte = pte_offset(pmd, address & PAGE_MASK);
202 if (pte && pte_present(*pte))
204 return(pte);
206 } else
208 return (0);
210 } else
212 return (0);
214 return (0);
217 unsigned long va_to_phys(unsigned long address)
219 pgd_t *dir;
220 pmd_t *pmd;
221 pte_t *pte;
223 dir = pgd_offset(current->mm, address & PAGE_MASK);
224 if (dir)
226 pmd = pmd_offset(dir, address & PAGE_MASK);
227 if (pmd && pmd_present(*pmd))
229 pte = pte_offset(pmd, address & PAGE_MASK);
230 if (pte && pte_present(*pte))
232 return(pte_page(*pte) | (address & ~(PAGE_MASK-1)));
234 } else
236 return (0);
238 } else
240 return (0);
242 return (0);
245 void
246 print_8xx_pte(struct mm_struct *mm, unsigned long addr)
248 pgd_t * pgd;
249 pmd_t * pmd;
250 pte_t * pte;
252 printk(" pte @ 0x%8lx: ", addr);
253 pgd = pgd_offset(mm, addr & PAGE_MASK);
254 if (pgd) {
255 pmd = pmd_offset(pgd, addr & PAGE_MASK);
256 if (pmd && pmd_present(*pmd)) {
257 pte = pte_offset(pmd, addr & PAGE_MASK);
258 if (pte) {
259 printk(" (0x%08lx)->(0x%08lx)->0x%08lx\n",
260 (long)pgd, (long)pte, (long)pte_val(*pte));
261 #define pp ((long)pte_val(*pte))
262 printk(" RPN: %05x PP: %x SPS: %x SH: %x "
263 "CI: %x v: %x\n",
264 pp>>12, /* rpn */
265 (pp>>10)&3, /* pp */
266 (pp>>3)&1, /* small */
267 (pp>>2)&1, /* shared */
268 (pp>>1)&1, /* cache inhibit */
269 pp&1 /* valid */
271 #undef pp
273 else {
274 printk("no pte\n");
277 else {
278 printk("no pmd\n");
281 else {
282 printk("no pgd\n");
287 get_8xx_pte(struct mm_struct *mm, unsigned long addr)
289 pgd_t * pgd;
290 pmd_t * pmd;
291 pte_t * pte;
292 int retval = 0;
294 pgd = pgd_offset(mm, addr & PAGE_MASK);
295 if (pgd) {
296 pmd = pmd_offset(pgd, addr & PAGE_MASK);
297 if (pmd && pmd_present(*pmd)) {
298 pte = pte_offset(pmd, addr & PAGE_MASK);
299 if (pte) {
300 retval = (int)pte_val(*pte);
304 return(retval);
306 #endif /* CONFIG_8xx */
308 #if 0
310 * Misc debugging functions. Please leave them here. -- Cort
312 void print_pte(struct _PTE p)
314 printk(
315 "%08x %08x vsid: %06x h: %01x api: %02x rpn: %05x rcwimg: %d%d%d%d%d%d pp: %02x\n",
316 *((unsigned long *)(&p)), *((long *)&p+1),
317 p.vsid, p.h, p.api, p.rpn,
318 p.r,p.c,p.w,p.i,p.m,p.g,p.pp);
322 * Search the hw hash table for a mapping to the given physical
323 * address. -- Cort
325 unsigned long htab_phys_to_va(unsigned long address)
327 extern PTE *Hash, *Hash_end;
328 PTE *ptr;
330 for ( ptr = Hash ; ptr < Hash_end ; ptr++ )
332 if ( ptr->rpn == (address>>12) )
333 printk("phys %08lX -> va ???\n",
334 address);
337 #endif