- Linus: more PageDirty / swapcache handling
[davej-history.git] / arch / mips64 / mm / andes.c
blob2921b8601ebf9d614c03fc8e2506c774c625d4b9
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1997, 1998, 1999 Ralf Baechle (ralf@gnu.org)
7 * Copyright (C) 1999 Silicon Graphics, Inc.
8 * Copyright (C) 2000 Kanoj Sarcar (kanoj@sgi.com)
9 */
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/mm.h>
14 #include <asm/page.h>
15 #include <asm/pgtable.h>
16 #include <asm/r10kcache.h>
17 #include <asm/system.h>
18 #include <asm/sgialib.h>
19 #include <asm/mmu_context.h>
21 static int scache_lsz64;
24 * This version has been tuned on an Origin. For other machines the arguments
25 * of the pref instructin may have to be tuned differently.
27 static void andes_clear_page(void * page)
29 __asm__ __volatile__(
30 ".set\tnoreorder\n\t"
31 ".set\tnoat\n\t"
32 "daddiu\t$1,%0,%2\n"
33 "1:\tpref 7,512(%0)\n\t"
34 "sd\t$0,(%0)\n\t"
35 "sd\t$0,8(%0)\n\t"
36 "sd\t$0,16(%0)\n\t"
37 "sd\t$0,24(%0)\n\t"
38 "daddiu\t%0,64\n\t"
39 "sd\t$0,-32(%0)\n\t"
40 "sd\t$0,-24(%0)\n\t"
41 "sd\t$0,-16(%0)\n\t"
42 "bne\t$1,%0,1b\n\t"
43 "sd\t$0,-8(%0)\n\t"
44 ".set\tat\n\t"
45 ".set\treorder"
46 :"=r" (page)
47 :"0" (page), "I" (PAGE_SIZE)
48 :"$1", "memory");
51 /* R10000 has no Create_Dirty type cacheops. */
52 static void andes_copy_page(void * to, void * from)
54 unsigned long dummy1, dummy2, reg1, reg2, reg3, reg4;
56 __asm__ __volatile__(
57 ".set\tnoreorder\n\t"
58 ".set\tnoat\n\t"
59 "daddiu\t$1,%0,%8\n"
60 "1:\tpref\t0,2*128(%1)\n\t"
61 "pref\t1,2*128(%0)\n\t"
62 "ld\t%2,(%1)\n\t"
63 "ld\t%3,8(%1)\n\t"
64 "ld\t%4,16(%1)\n\t"
65 "ld\t%5,24(%1)\n\t"
66 "sd\t%2,(%0)\n\t"
67 "sd\t%3,8(%0)\n\t"
68 "sd\t%4,16(%0)\n\t"
69 "sd\t%5,24(%0)\n\t"
70 "daddiu\t%0,64\n\t"
71 "daddiu\t%1,64\n\t"
72 "ld\t%2,-32(%1)\n\t"
73 "ld\t%3,-24(%1)\n\t"
74 "ld\t%4,-16(%1)\n\t"
75 "ld\t%5,-8(%1)\n\t"
76 "sd\t%2,-32(%0)\n\t"
77 "sd\t%3,-24(%0)\n\t"
78 "sd\t%4,-16(%0)\n\t"
79 "bne\t$1,%0,1b\n\t"
80 " sd\t%5,-8(%0)\n\t"
81 ".set\tat\n\t"
82 ".set\treorder"
83 :"=r" (dummy1), "=r" (dummy2), "=&r" (reg1), "=&r" (reg2),
84 "=&r" (reg3), "=&r" (reg4)
85 :"0" (to), "1" (from), "I" (PAGE_SIZE));
88 /* Cache operations. These are only used with the virtual memory system,
89 not for non-coherent I/O so it's ok to ignore the secondary caches. */
90 static void
91 andes_flush_cache_l1(void)
93 blast_dcache32(); blast_icache64();
97 * This is only used during initialization time. vmalloc() also calls
98 * this, but that will be changed pretty soon.
100 static void
101 andes_flush_cache_l2(void)
103 switch (sc_lsize()) {
104 case 64:
105 blast_scache64();
106 break;
107 case 128:
108 blast_scache128();
109 break;
110 default:
111 printk("Unknown L2 line size\n");
112 while(1);
116 void
117 andes_flush_icache_page(unsigned long page)
119 if (scache_lsz64)
120 blast_scache64_page(page);
121 else
122 blast_scache128_page(page);
125 static void
126 andes_flush_cache_sigtramp(unsigned long addr)
128 unsigned long daddr, iaddr;
130 daddr = addr & ~(dc_lsize - 1);
131 protected_writeback_dcache_line(daddr);
132 protected_writeback_dcache_line(daddr + dc_lsize);
133 iaddr = addr & ~(ic_lsize - 1);
134 protected_flush_icache_line(iaddr);
135 protected_flush_icache_line(iaddr + ic_lsize);
138 #define NTLB_ENTRIES 64
139 #define NTLB_ENTRIES_HALF 32
141 static inline void
142 andes_flush_tlb_all(void)
144 unsigned long flags;
145 unsigned long old_ctx;
146 unsigned long entry;
148 #ifdef DEBUG_TLB
149 printk("[tlball]");
150 #endif
152 __save_and_cli(flags);
153 /* Save old context and create impossible VPN2 value */
154 old_ctx = get_entryhi() & 0xff;
155 set_entryhi(CKSEG0);
156 set_entrylo0(0);
157 set_entrylo1(0);
159 entry = get_wired();
161 /* Blast 'em all away. */
162 while(entry < NTLB_ENTRIES) {
163 set_index(entry);
164 tlb_write_indexed();
165 entry++;
167 set_entryhi(old_ctx);
168 __restore_flags(flags);
171 static void andes_flush_tlb_mm(struct mm_struct *mm)
173 if (CPU_CONTEXT(smp_processor_id(), mm) != 0) {
174 unsigned long flags;
176 #ifdef DEBUG_TLB
177 printk("[tlbmm<%d>]", mm->context);
178 #endif
179 __save_and_cli(flags);
180 get_new_cpu_mmu_context(mm, smp_processor_id());
181 if(mm == current->mm)
182 set_entryhi(CPU_CONTEXT(smp_processor_id(), mm) & 0xff);
183 __restore_flags(flags);
187 static void
188 andes_flush_tlb_range(struct mm_struct *mm, unsigned long start,
189 unsigned long end)
191 if (CPU_CONTEXT(smp_processor_id(), mm) != 0) {
192 unsigned long flags;
193 int size;
195 #ifdef DEBUG_TLB
196 printk("[tlbrange<%02x,%08lx,%08lx>]", (mm->context & 0xff),
197 start, end);
198 #endif
199 __save_and_cli(flags);
200 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
201 size = (size + 1) >> 1;
202 if(size <= NTLB_ENTRIES_HALF) {
203 int oldpid = (get_entryhi() & 0xff);
204 int newpid = (CPU_CONTEXT(smp_processor_id(), mm) & 0xff);
206 start &= (PAGE_MASK << 1);
207 end += ((PAGE_SIZE << 1) - 1);
208 end &= (PAGE_MASK << 1);
209 while(start < end) {
210 int idx;
212 set_entryhi(start | newpid);
213 start += (PAGE_SIZE << 1);
214 tlb_probe();
215 idx = get_index();
216 set_entrylo0(0);
217 set_entrylo1(0);
218 set_entryhi(KSEG0);
219 if(idx < 0)
220 continue;
221 tlb_write_indexed();
223 set_entryhi(oldpid);
224 } else {
225 get_new_cpu_mmu_context(mm, smp_processor_id());
226 if(mm == current->mm)
227 set_entryhi(CPU_CONTEXT(smp_processor_id(), mm) &
228 0xff);
230 __restore_flags(flags);
234 static void
235 andes_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
237 if (CPU_CONTEXT(smp_processor_id(), vma->vm_mm) != 0) {
238 unsigned long flags;
239 int oldpid, newpid, idx;
241 #ifdef DEBUG_TLB
242 printk("[tlbpage<%d,%08lx>]", vma->vm_mm->context, page);
243 #endif
244 newpid = (CPU_CONTEXT(smp_processor_id(), vma->vm_mm) & 0xff);
245 page &= (PAGE_MASK << 1);
246 __save_and_cli(flags);
247 oldpid = (get_entryhi() & 0xff);
248 set_entryhi(page | newpid);
249 tlb_probe();
250 idx = get_index();
251 set_entrylo0(0);
252 set_entrylo1(0);
253 set_entryhi(KSEG0);
254 if(idx < 0)
255 goto finish;
256 tlb_write_indexed();
258 finish:
259 set_entryhi(oldpid);
260 __restore_flags(flags);
264 /* XXX Simplify this. On the R10000 writing a TLB entry for an virtual
265 address that already exists will overwrite the old entry and not result
266 in TLB malfunction or TLB shutdown. */
267 static void andes_update_mmu_cache(struct vm_area_struct * vma,
268 unsigned long address, pte_t pte)
270 unsigned long flags;
271 pgd_t *pgdp;
272 pmd_t *pmdp;
273 pte_t *ptep;
274 int idx, pid;
277 * Handle debugger faulting in for debugee.
279 if (current->active_mm != vma->vm_mm)
280 return;
282 __save_and_cli(flags);
283 pid = get_entryhi() & 0xff;
285 if((pid != (CPU_CONTEXT(smp_processor_id(), vma->vm_mm) & 0xff)) ||
286 (CPU_CONTEXT(smp_processor_id(), vma->vm_mm) == 0)) {
287 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%d
288 tlbpid=%d\n", (int) (CPU_CONTEXT(smp_processor_id(),
289 vma->vm_mm) & 0xff), pid);
292 address &= (PAGE_MASK << 1);
293 set_entryhi(address | (pid));
294 pgdp = pgd_offset(vma->vm_mm, address);
295 tlb_probe();
296 pmdp = pmd_offset(pgdp, address);
297 idx = get_index();
298 ptep = pte_offset(pmdp, address);
299 set_entrylo0(pte_val(*ptep++) >> 6);
300 set_entrylo1(pte_val(*ptep) >> 6);
301 set_entryhi(address | (pid));
302 if(idx < 0) {
303 tlb_write_random();
304 } else {
305 tlb_write_indexed();
307 set_entryhi(pid);
308 __restore_flags(flags);
311 static void andes_show_regs(struct pt_regs *regs)
313 printk("Cpu %d\n", smp_processor_id());
314 /* Saved main processor registers. */
315 printk("$0 : %016lx %016lx %016lx %016lx\n",
316 0UL, regs->regs[1], regs->regs[2], regs->regs[3]);
317 printk("$4 : %016lx %016lx %016lx %016lx\n",
318 regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]);
319 printk("$8 : %016lx %016lx %016lx %016lx\n",
320 regs->regs[8], regs->regs[9], regs->regs[10], regs->regs[11]);
321 printk("$12 : %016lx %016lx %016lx %016lx\n",
322 regs->regs[12], regs->regs[13], regs->regs[14], regs->regs[15]);
323 printk("$16 : %016lx %016lx %016lx %016lx\n",
324 regs->regs[16], regs->regs[17], regs->regs[18], regs->regs[19]);
325 printk("$20 : %016lx %016lx %016lx %016lx\n",
326 regs->regs[20], regs->regs[21], regs->regs[22], regs->regs[23]);
327 printk("$24 : %016lx %016lx\n",
328 regs->regs[24], regs->regs[25]);
329 printk("$28 : %016lx %016lx %016lx %016lx\n",
330 regs->regs[28], regs->regs[29], regs->regs[30], regs->regs[31]);
331 printk("Hi : %016lx\n", regs->hi);
332 printk("Lo : %016lx\n", regs->lo);
334 /* Saved cp0 registers. */
335 printk("epc : %016lx\nbadvaddr: %016lx\n",
336 regs->cp0_epc, regs->cp0_badvaddr);
337 printk("Status : %08x\nCause : %08x\n",
338 (unsigned int) regs->cp0_status, (unsigned int) regs->cp0_cause);
341 void __init ld_mmu_andes(void)
343 printk("CPU revision is: %08x\n", read_32bit_cp0_register(CP0_PRID));
345 printk("Primary instruction cache %dkb, linesize %d bytes\n",
346 icache_size >> 10, ic_lsize);
347 printk("Primary data cache %dkb, linesize %d bytes\n",
348 dcache_size >> 10, dc_lsize);
349 printk("Secondary cache sized at %ldK, linesize %ld\n",
350 scache_size() >> 10, sc_lsize());
352 _clear_page = andes_clear_page;
353 _copy_page = andes_copy_page;
355 _flush_cache_l1 = andes_flush_cache_l1;
356 _flush_cache_l2 = andes_flush_cache_l2;
357 _flush_cache_sigtramp = andes_flush_cache_sigtramp;
359 _flush_tlb_all = andes_flush_tlb_all;
360 _flush_tlb_mm = andes_flush_tlb_mm;
361 _flush_tlb_range = andes_flush_tlb_range;
362 _flush_tlb_page = andes_flush_tlb_page;
364 switch (sc_lsize()) {
365 case 64:
366 scache_lsz64 = 1;
367 break;
368 case 128:
369 scache_lsz64 = 0;
370 break;
371 default:
372 printk("Unknown L2 line size\n");
373 while(1);
376 update_mmu_cache = andes_update_mmu_cache;
378 _show_regs = andes_show_regs;
380 flush_cache_l1();
383 * You should never change this register:
384 * - On R4600 1.7 the tlbp never hits for pages smaller than
385 * the value in the c0_pagemask register.
386 * - The entire mm handling assumes the c0_pagemask register to
387 * be set for 4kb pages.
389 write_32bit_cp0_register(CP0_PAGEMASK, PM_4K);
391 /* From this point on the ARC firmware is dead. */
392 _flush_tlb_all();
394 /* Did I tell you that ARC SUCKS? */