Import 2.3.9pre5
[davej-history.git] / arch / mips / mm / r2300.c
blobfaa80e45744c658755590ab35d21c755728987e7
1 /*
2 * r2300.c: R2000 and R3000 specific mmu/cache code.
4 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
6 * with a lot of changes to make this thing work for R3000s
7 * Copyright (C) 1998 Harald Koerfgen
8 * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
10 * $Id: r2300.c,v 1.8 1999/04/11 17:13:56 harald Exp $
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/mm.h>
17 #include <asm/page.h>
18 #include <asm/pgtable.h>
19 #include <asm/mmu_context.h>
20 #include <asm/system.h>
21 #include <asm/sgialib.h>
22 #include <asm/mipsregs.h>
23 #include <asm/io.h>
25 * Temporarily disabled
27 #include <asm/wbflush.h>
31 * According to the paper written by D. Miller about Linux cache & TLB
32 * flush implementation, DMA/Driver coherence should be done at the
33 * driver layer. Thus, normally, we don't need flush dcache for R3000.
34 * Define this if driver does not handle cache consistency during DMA ops.
36 #undef DO_DCACHE_FLUSH
39 * Unified cache space description structure
41 static struct cache_space {
42 unsigned long ca_flags; /* Cache space access flags */
43 int size; /* Cache space size */
44 } icache, dcache;
46 #undef DEBUG_TLB
47 #undef DEBUG_CACHE
49 extern unsigned long mips_tlb_entries;
51 #define NTLB_ENTRIES 64 /* Fixed on all R23000 variants... */
53 /* page functions */
54 void r2300_clear_page(unsigned long page)
56 __asm__ __volatile__(
57 ".set\tnoreorder\n\t"
58 ".set\tnoat\n\t"
59 "addiu\t$1,%0,%2\n"
60 "1:\tsw\t$0,(%0)\n\t"
61 "sw\t$0,4(%0)\n\t"
62 "sw\t$0,8(%0)\n\t"
63 "sw\t$0,12(%0)\n\t"
64 "addiu\t%0,32\n\t"
65 "sw\t$0,-16(%0)\n\t"
66 "sw\t$0,-12(%0)\n\t"
67 "sw\t$0,-8(%0)\n\t"
68 "bne\t$1,%0,1b\n\t"
69 "sw\t$0,-4(%0)\n\t"
70 ".set\tat\n\t"
71 ".set\treorder"
72 :"=r" (page)
73 :"0" (page),
74 "I" (PAGE_SIZE)
75 :"$1","memory");
78 static void r2300_copy_page(unsigned long to, unsigned long from)
80 unsigned long dummy1, dummy2;
81 unsigned long reg1, reg2, reg3, reg4;
83 __asm__ __volatile__(
84 ".set\tnoreorder\n\t"
85 ".set\tnoat\n\t"
86 "addiu\t$1,%0,%8\n"
87 "1:\tlw\t%2,(%1)\n\t"
88 "lw\t%3,4(%1)\n\t"
89 "lw\t%4,8(%1)\n\t"
90 "lw\t%5,12(%1)\n\t"
91 "sw\t%2,(%0)\n\t"
92 "sw\t%3,4(%0)\n\t"
93 "sw\t%4,8(%0)\n\t"
94 "sw\t%5,12(%0)\n\t"
95 "lw\t%2,16(%1)\n\t"
96 "lw\t%3,20(%1)\n\t"
97 "lw\t%4,24(%1)\n\t"
98 "lw\t%5,28(%1)\n\t"
99 "sw\t%2,16(%0)\n\t"
100 "sw\t%3,20(%0)\n\t"
101 "sw\t%4,24(%0)\n\t"
102 "sw\t%5,28(%0)\n\t"
103 "addiu\t%0,64\n\t"
104 "addiu\t%1,64\n\t"
105 "lw\t%2,-32(%1)\n\t"
106 "lw\t%3,-28(%1)\n\t"
107 "lw\t%4,-24(%1)\n\t"
108 "lw\t%5,-20(%1)\n\t"
109 "sw\t%2,-32(%0)\n\t"
110 "sw\t%3,-28(%0)\n\t"
111 "sw\t%4,-24(%0)\n\t"
112 "sw\t%5,-20(%0)\n\t"
113 "lw\t%2,-16(%1)\n\t"
114 "lw\t%3,-12(%1)\n\t"
115 "lw\t%4,-8(%1)\n\t"
116 "lw\t%5,-4(%1)\n\t"
117 "sw\t%2,-16(%0)\n\t"
118 "sw\t%3,-12(%0)\n\t"
119 "sw\t%4,-8(%0)\n\t"
120 "bne\t$1,%0,1b\n\t"
121 "sw\t%5,-4(%0)\n\t"
122 ".set\tat\n\t"
123 ".set\treorder"
124 :"=r" (dummy1), "=r" (dummy2),
125 "=&r" (reg1), "=&r" (reg2), "=&r" (reg3), "=&r" (reg4)
126 :"0" (to), "1" (from),
127 "I" (PAGE_SIZE));
130 __initfunc(static unsigned long size_cache(unsigned long ca_flags))
132 unsigned long flags, status, dummy, size;
133 volatile unsigned long *p;
135 p = (volatile unsigned long *) KSEG0;
137 save_and_cli(flags);
139 /* isolate cache space */
140 write_32bit_cp0_register(CP0_STATUS, (ca_flags|flags)&~ST0_IEC);
142 *p = 0xa5a55a5a;
143 dummy = *p;
144 status = read_32bit_cp0_register(CP0_STATUS);
146 if (dummy != 0xa5a55a5a || (status & (1<<19))) {
147 size = 0;
148 } else {
149 for (size = 512; size <= 0x40000; size <<= 1)
150 *(p + size) = 0;
151 *p = -1;
152 for (size = 512;
153 (size <= 0x40000) && (*(p + size) == 0);
154 size <<= 1)
156 if (size > 0x40000)
157 size = 0;
159 restore_flags(flags);
161 return size * sizeof(*p);
164 __initfunc(static void probe_dcache(void))
166 dcache.size = size_cache(dcache.ca_flags = ST0_DE);
167 printk("Data cache %dkb\n", dcache.size >> 10);
170 __initfunc(static void probe_icache(void))
172 icache.size = size_cache(icache.ca_flags = ST0_DE|ST0_CE);
173 printk("Instruction cache %dkb\n", icache.size >> 10);
176 static inline unsigned long get_phys_page (unsigned long page,
177 struct mm_struct *mm)
179 page &= PAGE_MASK;
180 if (page >= KSEG0 && page < KSEG1) {
182 * We already have physical address
184 return page;
185 } else {
186 if (!mm) {
187 printk ("get_phys_page: vaddr without mm\n");
188 return 0;
189 } else {
191 * Find a physical page using mm_struct
193 pgd_t *page_dir;
194 pmd_t *page_middle;
195 pte_t *page_table, pte;
197 unsigned long address = page;
199 page_dir = pgd_offset(mm, address);
200 if (pgd_none(*page_dir))
201 return 0;
202 page_middle = pmd_offset(page_dir, address);
203 if (pmd_none(*page_middle))
204 return 0;
205 page_table = pte_offset(page_middle, address);
206 pte = *page_table;
207 if (!pte_present(pte))
208 return 0;
209 return pte_page(pte);
214 static inline void flush_cache_space_page(struct cache_space *space,
215 unsigned long page)
217 register unsigned long i, flags, size = space->size;
218 register volatile unsigned char *p = (volatile unsigned char*) page;
220 #ifndef DO_DCACHE_FLUSH
221 if (space == &dcache)
222 return;
223 #endif
224 if (size > PAGE_SIZE)
225 size = PAGE_SIZE;
227 save_and_cli(flags);
229 /* isolate cache space */
230 write_32bit_cp0_register(CP0_STATUS, (space->ca_flags|flags)&~ST0_IEC);
232 for (i = 0; i < size; i += 64) {
233 asm ( "sb\t$0,(%0)\n\t"
234 "sb\t$0,4(%0)\n\t"
235 "sb\t$0,8(%0)\n\t"
236 "sb\t$0,12(%0)\n\t"
237 "sb\t$0,16(%0)\n\t"
238 "sb\t$0,20(%0)\n\t"
239 "sb\t$0,24(%0)\n\t"
240 "sb\t$0,28(%0)\n\t"
241 "sb\t$0,32(%0)\n\t"
242 "sb\t$0,36(%0)\n\t"
243 "sb\t$0,40(%0)\n\t"
244 "sb\t$0,44(%0)\n\t"
245 "sb\t$0,48(%0)\n\t"
246 "sb\t$0,52(%0)\n\t"
247 "sb\t$0,56(%0)\n\t"
248 "sb\t$0,60(%0)\n\t"
249 : : "r" (p) );
250 p += 64;
253 restore_flags(flags);
256 static inline void flush_cache_space_all(struct cache_space *space)
258 unsigned long page = KSEG0;
259 int size = space->size;
261 #ifndef DO_DCACHE_FLUSH
262 if (space == &dcache)
263 return;
264 #endif
265 while(size > 0) {
266 flush_cache_space_page(space, page);
267 page += PAGE_SIZE; size -= PAGE_SIZE;
271 static inline void r2300_flush_cache_all(void)
273 flush_cache_space_all(&dcache);
274 flush_cache_space_all(&icache);
277 static void r2300_flush_cache_mm(struct mm_struct *mm)
279 if(mm->context == 0)
280 return;
281 #ifdef DEBUG_CACHE
282 printk("cmm[%d]", (int)mm->context);
283 #endif
285 * This function is called not offen, so it looks
286 * enough good to flush all caches than scan mm_struct,
287 * count pages to flush (and, very probably, flush more
288 * than cache space size :-)
290 flush_cache_all();
293 static void r2300_flush_cache_range(struct mm_struct *mm,
294 unsigned long start,
295 unsigned long end)
298 * In general, we need to flush both i- & d- caches here.
299 * Optimization: if cache space is less than given range,
300 * it is more quickly to flush all cache than all pages in range.
303 unsigned long page;
304 int icache_done = 0, dcache_done = 0;
306 if(mm->context == 0)
307 return;
308 #ifdef DEBUG_CACHE
309 printk("crange[%d]", (int)mm->context);
310 #endif
311 if (end - start >= icache.size) {
312 flush_cache_space_all(&icache);
313 icache_done = 1;
315 if (end - start >= dcache.size) {
316 flush_cache_space_all(&dcache);
317 dcache_done = 1;
319 if (icache_done && dcache_done)
320 return;
322 for (page = start; page < end; page += PAGE_SIZE) {
323 unsigned long phys_page = get_phys_page(page, mm);
325 if (phys_page) {
326 if (!icache_done)
327 flush_cache_space_page(&icache, phys_page);
328 if (!dcache_done)
329 flush_cache_space_page(&dcache, phys_page);
334 static void r2300_flush_cache_page(struct vm_area_struct *vma,
335 unsigned long page)
337 struct mm_struct *mm = vma->vm_mm;
339 if(mm->context == 0)
340 return;
341 #ifdef DEBUG_CACHE
342 printk("cpage[%d,%08lx]", (int)mm->context, page);
343 #endif
345 * User changes page, so we need to check:
346 * is icache page flush needed ?
347 * It looks we don't need to flush dcache,
348 * due it is write-transparent on R3000
350 if (vma->vm_flags & VM_EXEC) {
351 unsigned long phys_page = get_phys_page(page, vma->vm_mm);
352 if (phys_page)
353 flush_cache_space_page(&icache, phys_page);
357 static void r2300_flush_page_to_ram(unsigned long page)
360 * We need to flush both i- & d- caches :-(
362 unsigned long phys_page = get_phys_page(page, NULL);
363 #ifdef DEBUG_CACHE
364 printk("cram[%08lx]", page);
365 #endif
366 if (phys_page) {
367 flush_cache_space_page(&icache, phys_page);
368 flush_cache_space_page(&dcache, phys_page);
372 static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
374 register unsigned long i, flags;
375 register volatile unsigned char *p = (volatile unsigned char*) start;
378 * Temporarily disabled
379 wbflush();
383 * Invalidate dcache
385 if (size < 64)
386 size = 64;
388 if (size > dcache.size)
389 size = dcache.size;
391 save_and_cli(flags);
393 /* isolate cache space */
394 write_32bit_cp0_register(CP0_STATUS, (ST0_DE|flags)&~ST0_IEC);
396 for (i = 0; i < size; i += 64) {
397 asm ( "sb\t$0,(%0)\n\t"
398 "sb\t$0,4(%0)\n\t"
399 "sb\t$0,8(%0)\n\t"
400 "sb\t$0,12(%0)\n\t"
401 "sb\t$0,16(%0)\n\t"
402 "sb\t$0,20(%0)\n\t"
403 "sb\t$0,24(%0)\n\t"
404 "sb\t$0,28(%0)\n\t"
405 "sb\t$0,32(%0)\n\t"
406 "sb\t$0,36(%0)\n\t"
407 "sb\t$0,40(%0)\n\t"
408 "sb\t$0,44(%0)\n\t"
409 "sb\t$0,48(%0)\n\t"
410 "sb\t$0,52(%0)\n\t"
411 "sb\t$0,56(%0)\n\t"
412 "sb\t$0,60(%0)\n\t"
413 : : "r" (p) );
414 p += 64;
417 restore_flags(flags);
420 static void r2300_flush_cache_sigtramp(unsigned long page)
423 * We need only flush i-cache here
425 * This function receives virtual address (from signal.c),
426 * but this moment we have needed mm_struct in 'current'
428 unsigned long phys_page = get_phys_page(page, current->mm);
429 #ifdef DEBUG_CACHE
430 printk("csigtramp[%08lx]", page);
431 #endif
432 if (phys_page)
433 flush_cache_space_page(&icache, phys_page);
436 /* TLB operations. */
437 static inline void r2300_flush_tlb_all(void)
439 unsigned long flags;
440 unsigned long old_ctx;
441 int entry;
443 #ifdef DEBUG_TLB
444 printk("[tlball]");
445 #endif
447 save_and_cli(flags);
448 old_ctx = (get_entryhi() & 0xfc0);
449 write_32bit_cp0_register(CP0_ENTRYLO0, 0);
450 for(entry = 0; entry < NTLB_ENTRIES; entry++) {
451 write_32bit_cp0_register(CP0_INDEX, entry << 8);
452 write_32bit_cp0_register(CP0_ENTRYHI, ((entry | 0x80000) << 12));
453 __asm__ __volatile__("tlbwi");
455 set_entryhi(old_ctx);
456 restore_flags(flags);
459 static void r2300_flush_tlb_mm(struct mm_struct *mm)
461 if(mm->context != 0) {
462 unsigned long flags;
464 #ifdef DEBUG_TLB
465 printk("[tlbmm<%d>]", mm->context);
466 #endif
467 save_and_cli(flags);
468 get_new_mmu_context(mm, asid_cache);
469 if(mm == current->mm)
470 set_entryhi(mm->context & 0xfc0);
471 restore_flags(flags);
475 static void r2300_flush_tlb_range(struct mm_struct *mm, unsigned long start,
476 unsigned long end)
478 if(mm->context != 0) {
479 unsigned long flags;
480 int size;
482 #ifdef DEBUG_TLB
483 printk("[tlbrange<%02x,%08lx,%08lx>]", (mm->context & 0xfc0),
484 start, end);
485 #endif
486 save_and_cli(flags);
487 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
488 if(size <= NTLB_ENTRIES) {
489 int oldpid = (get_entryhi() & 0xfc0);
490 int newpid = (mm->context & 0xfc0);
492 start &= PAGE_MASK;
493 end += (PAGE_SIZE - 1);
494 end &= PAGE_MASK;
495 while(start < end) {
496 int idx;
498 set_entryhi(start | newpid);
499 start += PAGE_SIZE;
500 tlb_probe();
501 idx = get_index();
502 set_entrylo0(0);
503 set_entryhi(KSEG0);
504 if(idx < 0)
505 continue;
506 tlb_write_indexed();
508 set_entryhi(oldpid);
509 } else {
510 get_new_mmu_context(mm, asid_cache);
511 if(mm == current->mm)
512 set_entryhi(mm->context & 0xfc0);
514 restore_flags(flags);
518 static void r2300_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
520 if(vma->vm_mm->context != 0) {
521 unsigned long flags;
522 int oldpid, newpid, idx;
524 #ifdef DEBUG_TLB
525 printk("[tlbpage<%d,%08lx>]", vma->vm_mm->context, page);
526 #endif
527 newpid = (vma->vm_mm->context & 0xfc0);
528 page &= PAGE_MASK;
529 save_and_cli(flags);
530 oldpid = (get_entryhi() & 0xfc0);
531 set_entryhi(page | newpid);
532 tlb_probe();
533 idx = get_index();
534 set_entrylo0(0);
535 set_entryhi(KSEG0);
536 if(idx < 0)
537 goto finish;
538 tlb_write_indexed();
540 finish:
541 set_entryhi(oldpid);
542 restore_flags(flags);
546 /* Load a new root pointer into the TLB. */
547 static void r2300_load_pgd(unsigned long pg_dir)
552 * Initialize new page directory with pointers to invalid ptes
554 static void r2300_pgd_init(unsigned long page)
556 unsigned long dummy1, dummy2;
559 * The plain and boring version for the R3000. No cache flushing
560 * stuff is implemented since the R3000 has physical caches.
562 __asm__ __volatile__(
563 ".set\tnoreorder\n"
564 "1:\tsw\t%2,(%0)\n\t"
565 "sw\t%2,4(%0)\n\t"
566 "sw\t%2,8(%0)\n\t"
567 "sw\t%2,12(%0)\n\t"
568 "sw\t%2,16(%0)\n\t"
569 "sw\t%2,20(%0)\n\t"
570 "sw\t%2,24(%0)\n\t"
571 "sw\t%2,28(%0)\n\t"
572 "subu\t%1,1\n\t"
573 "bnez\t%1,1b\n\t"
574 "addiu\t%0,32\n\t"
575 ".set\treorder"
576 :"=r" (dummy1),
577 "=r" (dummy2)
578 :"r" ((unsigned long) invalid_pte_table),
579 "0" (page),
580 "1" (PAGE_SIZE/(sizeof(pmd_t)*8)));
583 static void r2300_update_mmu_cache(struct vm_area_struct * vma,
584 unsigned long address, pte_t pte)
586 unsigned long flags;
587 pgd_t *pgdp;
588 pmd_t *pmdp;
589 pte_t *ptep;
590 int idx, pid;
592 pid = (get_entryhi() & 0xfc0);
594 #ifdef DEBUG_TLB
595 if((pid != (vma->vm_mm->context & 0xfc0)) || (vma->vm_mm->context == 0)) {
596 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%d tlbpid=%d\n",
597 (int) (vma->vm_mm->context & 0xfc0), pid);
599 #endif
601 save_and_cli(flags);
602 address &= PAGE_MASK;
603 set_entryhi(address | (pid));
604 pgdp = pgd_offset(vma->vm_mm, address);
605 tlb_probe();
606 pmdp = pmd_offset(pgdp, address);
607 idx = get_index();
608 ptep = pte_offset(pmdp, address);
609 set_entrylo0(pte_val(*ptep));
610 set_entryhi(address | (pid));
611 if(idx < 0) {
612 tlb_write_random();
613 #if 0
614 printk("[MISS]");
615 #endif
616 } else {
617 tlb_write_indexed();
618 #if 0
619 printk("[HIT]");
620 #endif
622 #if 0
623 if(!strcmp(current->comm, "args")) {
624 printk("<");
625 for(idx = 0; idx < NTLB_ENTRIES; idx++) {
626 set_index(idx);
627 tlb_read();
628 address = get_entryhi();
629 if((address & 0xfc0) != 0)
630 printk("[%08lx]", address);
632 printk(">\n");
634 #endif
635 set_entryhi(pid);
636 restore_flags(flags);
639 static void r2300_show_regs(struct pt_regs * regs)
642 * Saved main processor registers
644 printk("$0 : %08x %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
645 0, (unsigned long) regs->regs[1], (unsigned long) regs->regs[2],
646 (unsigned long) regs->regs[3], (unsigned long) regs->regs[4],
647 (unsigned long) regs->regs[5], (unsigned long) regs->regs[6],
648 (unsigned long) regs->regs[7]);
649 printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
650 (unsigned long) regs->regs[8], (unsigned long) regs->regs[9],
651 (unsigned long) regs->regs[10], (unsigned long) regs->regs[11],
652 (unsigned long) regs->regs[12], (unsigned long) regs->regs[13],
653 (unsigned long) regs->regs[14], (unsigned long) regs->regs[15]);
654 printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
655 (unsigned long) regs->regs[16], (unsigned long) regs->regs[17],
656 (unsigned long) regs->regs[18], (unsigned long) regs->regs[19],
657 (unsigned long) regs->regs[20], (unsigned long) regs->regs[21],
658 (unsigned long) regs->regs[22], (unsigned long) regs->regs[23]);
659 printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx\n",
660 (unsigned long) regs->regs[24], (unsigned long) regs->regs[25],
661 (unsigned long) regs->regs[28], (unsigned long) regs->regs[29],
662 (unsigned long) regs->regs[30], (unsigned long) regs->regs[31]);
665 * Saved cp0 registers
667 printk("epc : %08lx\nStatus: %08x\nCause : %08x\n",
668 (unsigned long) regs->cp0_epc, (unsigned int) regs->cp0_status,
669 (unsigned int) regs->cp0_cause);
672 static void r2300_add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
673 unsigned long entryhi, unsigned long pagemask)
675 printk("r2300_add_wired_entry");
677 * FIXME, to be done
681 static int r2300_user_mode(struct pt_regs *regs)
683 return !(regs->cp0_status & ST0_KUP);
686 __initfunc(void ld_mmu_r2300(void))
688 printk("CPU revision is: %08x\n", read_32bit_cp0_register(CP0_PRID));
690 clear_page = r2300_clear_page;
691 copy_page = r2300_copy_page;
693 probe_icache();
694 probe_dcache();
696 flush_cache_all = r2300_flush_cache_all;
697 flush_cache_mm = r2300_flush_cache_mm;
698 flush_cache_range = r2300_flush_cache_range;
699 flush_cache_page = r2300_flush_cache_page;
700 flush_cache_sigtramp = r2300_flush_cache_sigtramp;
701 flush_page_to_ram = r2300_flush_page_to_ram;
703 flush_tlb_all = r2300_flush_tlb_all;
704 flush_tlb_mm = r2300_flush_tlb_mm;
705 flush_tlb_range = r2300_flush_tlb_range;
706 flush_tlb_page = r2300_flush_tlb_page;
708 dma_cache_wback_inv = r3k_dma_cache_wback_inv;
710 load_pgd = r2300_load_pgd;
711 pgd_init = r2300_pgd_init;
712 update_mmu_cache = r2300_update_mmu_cache;
713 r3000_asid_setup();
715 show_regs = r2300_show_regs;
717 add_wired_entry = r2300_add_wired_entry;
719 user_mode = r2300_user_mode;
721 flush_tlb_all();