header cleaning: don't include smp_lock.h when not used
[linux-2.6/kmemtrace.git] / arch / sh64 / mm / fault.c
blob4dd8ee8f01ce61e63558556cea363e7a250a210a
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * arch/sh64/mm/fault.c
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
10 * Copyright (C) 2003 Paul Mundt
14 #include <linux/signal.h>
15 #include <linux/rwsem.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
23 #include <linux/mm.h>
24 #include <linux/smp.h>
25 #include <linux/interrupt.h>
27 #include <asm/system.h>
28 #include <asm/io.h>
29 #include <asm/tlb.h>
30 #include <asm/uaccess.h>
31 #include <asm/pgalloc.h>
32 #include <asm/mmu_context.h>
33 #include <asm/registers.h> /* required by inline asm statements */
35 #if defined(CONFIG_SH64_PROC_TLB)
36 #include <linux/init.h>
37 #include <linux/proc_fs.h>
38 /* Count numbers of tlb refills in each region */
39 static unsigned long long calls_to_update_mmu_cache = 0ULL;
40 static unsigned long long calls_to_flush_tlb_page = 0ULL;
41 static unsigned long long calls_to_flush_tlb_range = 0ULL;
42 static unsigned long long calls_to_flush_tlb_mm = 0ULL;
43 static unsigned long long calls_to_flush_tlb_all = 0ULL;
44 unsigned long long calls_to_do_slow_page_fault = 0ULL;
45 unsigned long long calls_to_do_fast_page_fault = 0ULL;
47 /* Count size of ranges for flush_tlb_range */
48 static unsigned long long flush_tlb_range_1 = 0ULL;
49 static unsigned long long flush_tlb_range_2 = 0ULL;
50 static unsigned long long flush_tlb_range_3_4 = 0ULL;
51 static unsigned long long flush_tlb_range_5_7 = 0ULL;
52 static unsigned long long flush_tlb_range_8_11 = 0ULL;
53 static unsigned long long flush_tlb_range_12_15 = 0ULL;
54 static unsigned long long flush_tlb_range_16_up = 0ULL;
56 static unsigned long long page_not_present = 0ULL;
58 #endif
60 extern void die(const char *,struct pt_regs *,long);
62 #define PFLAG(val,flag) (( (val) & (flag) ) ? #flag : "" )
63 #define PPROT(flag) PFLAG(pgprot_val(prot),flag)
65 static inline void print_prots(pgprot_t prot)
67 printk("prot is 0x%08lx\n",pgprot_val(prot));
69 printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ),
70 PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER));
73 static inline void print_vma(struct vm_area_struct *vma)
75 printk("vma start 0x%08lx\n", vma->vm_start);
76 printk("vma end 0x%08lx\n", vma->vm_end);
78 print_prots(vma->vm_page_prot);
79 printk("vm_flags 0x%08lx\n", vma->vm_flags);
82 static inline void print_task(struct task_struct *tsk)
84 printk("Task pid %d\n", tsk->pid);
87 static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address)
89 pgd_t *dir;
90 pmd_t *pmd;
91 pte_t *pte;
92 pte_t entry;
94 dir = pgd_offset(mm, address);
95 if (pgd_none(*dir)) {
96 return NULL;
99 pmd = pmd_offset(dir, address);
100 if (pmd_none(*pmd)) {
101 return NULL;
104 pte = pte_offset_kernel(pmd, address);
105 entry = *pte;
107 if (pte_none(entry)) {
108 return NULL;
110 if (!pte_present(entry)) {
111 return NULL;
114 return pte;
118 * This routine handles page faults. It determines the address,
119 * and the problem, and then passes it off to one of the appropriate
120 * routines.
122 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
123 unsigned long textaccess, unsigned long address)
125 struct task_struct *tsk;
126 struct mm_struct *mm;
127 struct vm_area_struct * vma;
128 const struct exception_table_entry *fixup;
129 pte_t *pte;
131 #if defined(CONFIG_SH64_PROC_TLB)
132 ++calls_to_do_slow_page_fault;
133 #endif
135 /* SIM
136 * Note this is now called with interrupts still disabled
137 * This is to cope with being called for a missing IO port
138 * address with interupts disabled. This should be fixed as
139 * soon as we have a better 'fast path' miss handler.
141 * Plus take care how you try and debug this stuff.
142 * For example, writing debug data to a port which you
143 * have just faulted on is not going to work.
146 tsk = current;
147 mm = tsk->mm;
149 /* Not an IO address, so reenable interrupts */
150 local_irq_enable();
153 * If we're in an interrupt or have no user
154 * context, we must not take the fault..
156 if (in_atomic() || !mm)
157 goto no_context;
159 /* TLB misses upon some cache flushes get done under cli() */
160 down_read(&mm->mmap_sem);
162 vma = find_vma(mm, address);
164 if (!vma) {
165 #ifdef DEBUG_FAULT
166 print_task(tsk);
167 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
168 __FUNCTION__,__LINE__,
169 address,regs->pc,textaccess,writeaccess);
170 show_regs(regs);
171 #endif
172 goto bad_area;
174 if (vma->vm_start <= address) {
175 goto good_area;
178 if (!(vma->vm_flags & VM_GROWSDOWN)) {
179 #ifdef DEBUG_FAULT
180 print_task(tsk);
181 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
182 __FUNCTION__,__LINE__,
183 address,regs->pc,textaccess,writeaccess);
184 show_regs(regs);
186 print_vma(vma);
187 #endif
188 goto bad_area;
190 if (expand_stack(vma, address)) {
191 #ifdef DEBUG_FAULT
192 print_task(tsk);
193 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
194 __FUNCTION__,__LINE__,
195 address,regs->pc,textaccess,writeaccess);
196 show_regs(regs);
197 #endif
198 goto bad_area;
201 * Ok, we have a good vm_area for this memory access, so
202 * we can handle it..
204 good_area:
205 if (textaccess) {
206 if (!(vma->vm_flags & VM_EXEC))
207 goto bad_area;
208 } else {
209 if (writeaccess) {
210 if (!(vma->vm_flags & VM_WRITE))
211 goto bad_area;
212 } else {
213 if (!(vma->vm_flags & VM_READ))
214 goto bad_area;
219 * If for any reason at all we couldn't handle the fault,
220 * make sure we exit gracefully rather than endlessly redo
221 * the fault.
223 survive:
224 switch (handle_mm_fault(mm, vma, address, writeaccess)) {
225 case VM_FAULT_MINOR:
226 tsk->min_flt++;
227 break;
228 case VM_FAULT_MAJOR:
229 tsk->maj_flt++;
230 break;
231 case VM_FAULT_SIGBUS:
232 goto do_sigbus;
233 default:
234 goto out_of_memory;
236 /* If we get here, the page fault has been handled. Do the TLB refill
237 now from the newly-setup PTE, to avoid having to fault again right
238 away on the same instruction. */
239 pte = lookup_pte (mm, address);
240 if (!pte) {
241 /* From empirical evidence, we can get here, due to
242 !pte_present(pte). (e.g. if a swap-in occurs, and the page
243 is swapped back out again before the process that wanted it
244 gets rescheduled?) */
245 goto no_pte;
248 __do_tlb_refill(address, textaccess, pte);
250 no_pte:
252 up_read(&mm->mmap_sem);
253 return;
256 * Something tried to access memory that isn't in our memory map..
257 * Fix it, but check if it's kernel or user first..
259 bad_area:
260 #ifdef DEBUG_FAULT
261 printk("fault:bad area\n");
262 #endif
263 up_read(&mm->mmap_sem);
265 if (user_mode(regs)) {
266 static int count=0;
267 siginfo_t info;
268 if (count < 4) {
269 /* This is really to help debug faults when starting
270 * usermode, so only need a few */
271 count++;
272 printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n",
273 address, current->pid, current->comm,
274 (unsigned long) regs->pc);
275 #if 0
276 show_regs(regs);
277 #endif
279 if (is_init(tsk)) {
280 panic("INIT had user mode bad_area\n");
282 tsk->thread.address = address;
283 tsk->thread.error_code = writeaccess;
284 info.si_signo = SIGSEGV;
285 info.si_errno = 0;
286 info.si_addr = (void *) address;
287 force_sig_info(SIGSEGV, &info, tsk);
288 return;
291 no_context:
292 #ifdef DEBUG_FAULT
293 printk("fault:No context\n");
294 #endif
295 /* Are we prepared to handle this kernel fault? */
296 fixup = search_exception_tables(regs->pc);
297 if (fixup) {
298 regs->pc = fixup->fixup;
299 return;
303 * Oops. The kernel tried to access some bad page. We'll have to
304 * terminate things with extreme prejudice.
307 if (address < PAGE_SIZE)
308 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
309 else
310 printk(KERN_ALERT "Unable to handle kernel paging request");
311 printk(" at virtual address %08lx\n", address);
312 printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff);
313 die("Oops", regs, writeaccess);
314 do_exit(SIGKILL);
317 * We ran out of memory, or some other thing happened to us that made
318 * us unable to handle the page fault gracefully.
320 out_of_memory:
321 if (is_init(current)) {
322 panic("INIT out of memory\n");
323 yield();
324 goto survive;
326 printk("fault:Out of memory\n");
327 up_read(&mm->mmap_sem);
328 if (is_init(current)) {
329 yield();
330 down_read(&mm->mmap_sem);
331 goto survive;
333 printk("VM: killing process %s\n", tsk->comm);
334 if (user_mode(regs))
335 do_exit(SIGKILL);
336 goto no_context;
338 do_sigbus:
339 printk("fault:Do sigbus\n");
340 up_read(&mm->mmap_sem);
343 * Send a sigbus, regardless of whether we were in kernel
344 * or user mode.
346 tsk->thread.address = address;
347 tsk->thread.error_code = writeaccess;
348 tsk->thread.trap_no = 14;
349 force_sig(SIGBUS, tsk);
351 /* Kernel mode? Handle exceptions or die */
352 if (!user_mode(regs))
353 goto no_context;
357 void flush_tlb_all(void);
359 void update_mmu_cache(struct vm_area_struct * vma,
360 unsigned long address, pte_t pte)
362 #if defined(CONFIG_SH64_PROC_TLB)
363 ++calls_to_update_mmu_cache;
364 #endif
367 * This appears to get called once for every pte entry that gets
368 * established => I don't think it's efficient to try refilling the
369 * TLBs with the pages - some may not get accessed even. Also, for
370 * executable pages, it is impossible to determine reliably here which
371 * TLB they should be mapped into (or both even).
373 * So, just do nothing here and handle faults on demand. In the
374 * TLBMISS handling case, the refill is now done anyway after the pte
375 * has been fixed up, so that deals with most useful cases.
379 static void __flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
381 unsigned long long match, pteh=0, lpage;
382 unsigned long tlb;
383 struct mm_struct *mm;
385 mm = vma->vm_mm;
387 if (mm->context == NO_CONTEXT)
388 return;
391 * Sign-extend based on neff.
393 lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page;
394 match = ((mm->context & MMU_CONTEXT_ASID_MASK) << PTEH_ASID_SHIFT) | PTEH_VALID;
395 match |= lpage;
397 /* Do ITLB : don't bother for pages in non-exectutable VMAs */
398 if (vma->vm_flags & VM_EXEC) {
399 for_each_itlb_entry(tlb) {
400 asm volatile ("getcfg %1, 0, %0"
401 : "=r" (pteh)
402 : "r" (tlb) );
404 if (pteh == match) {
405 __flush_tlb_slot(tlb);
406 break;
412 /* Do DTLB : any page could potentially be in here. */
413 for_each_dtlb_entry(tlb) {
414 asm volatile ("getcfg %1, 0, %0"
415 : "=r" (pteh)
416 : "r" (tlb) );
418 if (pteh == match) {
419 __flush_tlb_slot(tlb);
420 break;
426 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
428 unsigned long flags;
430 #if defined(CONFIG_SH64_PROC_TLB)
431 ++calls_to_flush_tlb_page;
432 #endif
434 if (vma->vm_mm) {
435 page &= PAGE_MASK;
436 local_irq_save(flags);
437 __flush_tlb_page(vma, page);
438 local_irq_restore(flags);
442 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
443 unsigned long end)
445 unsigned long flags;
446 unsigned long long match, pteh=0, pteh_epn, pteh_low;
447 unsigned long tlb;
448 struct mm_struct *mm;
450 mm = vma->vm_mm;
452 #if defined(CONFIG_SH64_PROC_TLB)
453 ++calls_to_flush_tlb_range;
456 unsigned long size = (end - 1) - start;
457 size >>= 12; /* divide by PAGE_SIZE */
458 size++; /* end=start+4096 => 1 page */
459 switch (size) {
460 case 1 : flush_tlb_range_1++; break;
461 case 2 : flush_tlb_range_2++; break;
462 case 3 ... 4 : flush_tlb_range_3_4++; break;
463 case 5 ... 7 : flush_tlb_range_5_7++; break;
464 case 8 ... 11 : flush_tlb_range_8_11++; break;
465 case 12 ... 15 : flush_tlb_range_12_15++; break;
466 default : flush_tlb_range_16_up++; break;
469 #endif
471 if (mm->context == NO_CONTEXT)
472 return;
474 local_irq_save(flags);
476 start &= PAGE_MASK;
477 end &= PAGE_MASK;
479 match = ((mm->context & MMU_CONTEXT_ASID_MASK) << PTEH_ASID_SHIFT) | PTEH_VALID;
481 /* Flush ITLB */
482 for_each_itlb_entry(tlb) {
483 asm volatile ("getcfg %1, 0, %0"
484 : "=r" (pteh)
485 : "r" (tlb) );
487 pteh_epn = pteh & PAGE_MASK;
488 pteh_low = pteh & ~PAGE_MASK;
490 if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
491 __flush_tlb_slot(tlb);
494 /* Flush DTLB */
495 for_each_dtlb_entry(tlb) {
496 asm volatile ("getcfg %1, 0, %0"
497 : "=r" (pteh)
498 : "r" (tlb) );
500 pteh_epn = pteh & PAGE_MASK;
501 pteh_low = pteh & ~PAGE_MASK;
503 if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
504 __flush_tlb_slot(tlb);
507 local_irq_restore(flags);
510 void flush_tlb_mm(struct mm_struct *mm)
512 unsigned long flags;
514 #if defined(CONFIG_SH64_PROC_TLB)
515 ++calls_to_flush_tlb_mm;
516 #endif
518 if (mm->context == NO_CONTEXT)
519 return;
521 local_irq_save(flags);
523 mm->context=NO_CONTEXT;
524 if(mm==current->mm)
525 activate_context(mm);
527 local_irq_restore(flags);
531 void flush_tlb_all(void)
533 /* Invalidate all, including shared pages, excluding fixed TLBs */
535 unsigned long flags, tlb;
537 #if defined(CONFIG_SH64_PROC_TLB)
538 ++calls_to_flush_tlb_all;
539 #endif
541 local_irq_save(flags);
543 /* Flush each ITLB entry */
544 for_each_itlb_entry(tlb) {
545 __flush_tlb_slot(tlb);
548 /* Flush each DTLB entry */
549 for_each_dtlb_entry(tlb) {
550 __flush_tlb_slot(tlb);
553 local_irq_restore(flags);
556 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
558 /* FIXME: Optimize this later.. */
559 flush_tlb_all();
562 #if defined(CONFIG_SH64_PROC_TLB)
563 /* Procfs interface to read the performance information */
565 static int
566 tlb_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data)
568 int len=0;
569 len += sprintf(buf+len, "do_fast_page_fault called %12lld times\n", calls_to_do_fast_page_fault);
570 len += sprintf(buf+len, "do_slow_page_fault called %12lld times\n", calls_to_do_slow_page_fault);
571 len += sprintf(buf+len, "update_mmu_cache called %12lld times\n", calls_to_update_mmu_cache);
572 len += sprintf(buf+len, "flush_tlb_page called %12lld times\n", calls_to_flush_tlb_page);
573 len += sprintf(buf+len, "flush_tlb_range called %12lld times\n", calls_to_flush_tlb_range);
574 len += sprintf(buf+len, "flush_tlb_mm called %12lld times\n", calls_to_flush_tlb_mm);
575 len += sprintf(buf+len, "flush_tlb_all called %12lld times\n", calls_to_flush_tlb_all);
576 len += sprintf(buf+len, "flush_tlb_range_sizes\n"
577 " 1 : %12lld\n"
578 " 2 : %12lld\n"
579 " 3 - 4 : %12lld\n"
580 " 5 - 7 : %12lld\n"
581 " 8 - 11 : %12lld\n"
582 "12 - 15 : %12lld\n"
583 "16+ : %12lld\n",
584 flush_tlb_range_1, flush_tlb_range_2, flush_tlb_range_3_4,
585 flush_tlb_range_5_7, flush_tlb_range_8_11, flush_tlb_range_12_15,
586 flush_tlb_range_16_up);
587 len += sprintf(buf+len, "page not present %12lld times\n", page_not_present);
588 *eof = 1;
589 return len;
592 static int __init register_proc_tlb(void)
594 create_proc_read_entry("tlb", 0, NULL, tlb_proc_info, NULL);
595 return 0;
598 __initcall(register_proc_tlb);
600 #endif