Import 2.3.16
[davej-history.git] / arch / sparc / mm / sun4c.c
blobf91ab1ce6d5649c25fbc494378567c427d275868
1 /* $Id: sun4c.c,v 1.176 1999/08/31 06:54:42 davem Exp $
2 * sun4c.c: Doing in software what should be done in hardware.
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1996 Andrew Tridgell (Andrew.Tridgell@anu.edu.au)
7 * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
8 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
11 #include <linux/config.h>
12 #include <linux/kernel.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
16 #include <asm/page.h>
17 #include <asm/pgtable.h>
18 #include <asm/vaddrs.h>
19 #include <asm/idprom.h>
20 #include <asm/machines.h>
21 #include <asm/memreg.h>
22 #include <asm/processor.h>
23 #include <asm/auxio.h>
24 #include <asm/io.h>
25 #include <asm/oplib.h>
26 #include <asm/openprom.h>
27 #include <asm/mmu_context.h>
28 #include <asm/sun4paddr.h>
30 /* TODO: Make it such that interrupt handlers cannot dick with
31 * the user segment lists, most of the cli/sti pairs can
32 * disappear once that is taken care of.
35 /* XXX Ok the real performance win, I figure, will be to use a combined hashing
36 * XXX and bitmap scheme to keep track of what we have mapped where. The whole
37 * XXX incentive is to make it such that the range flushes can be serviced
38 * XXX always in near constant time. --DaveM
41 extern int num_segmaps, num_contexts;
43 /* Define this to get extremely anal debugging, undefine for performance. */
44 /* #define DEBUG_SUN4C_MM */
46 #define UWINMASK_OFFSET (const unsigned long)(&(((struct task_struct *)0)->tss.uwinmask))
48 /* This is used in many routines below. */
49 #define FUW_INLINE do { \
50 register int ctr asm("g5"); \
51 ctr = 0; \
52 __asm__ __volatile__("\n" \
53 "1: ld [%%g6 + %2], %%g4 ! flush user windows\n" \
54 " orcc %%g0, %%g4, %%g0\n" \
55 " add %0, 1, %0\n" \
56 " bne 1b\n" \
57 " save %%sp, -64, %%sp\n" \
58 "2: subcc %0, 1, %0\n" \
59 " bne 2b\n" \
60 " restore %%g0, %%g0, %%g0\n" \
61 : "=&r" (ctr) \
62 : "0" (ctr), "i" (UWINMASK_OFFSET) \
63 : "g4", "cc"); \
64 } while(0);
66 #ifdef CONFIG_SUN4
67 #define SUN4C_VAC_SIZE sun4c_vacinfo.num_bytes
68 #else
69 /* That's it, we prom_halt() on sun4c if the cache size is something other than 65536.
70 * So let's save some cycles and just use that everywhere except for that bootup
71 * sanity check.
73 #define SUN4C_VAC_SIZE 65536
74 #endif
76 #define SUN4C_KERNEL_BUCKETS 32
78 #ifndef MAX
79 #define MAX(a,b) ((a)<(b)?(b):(a))
80 #endif
81 #ifndef MIN
82 #define MIN(a,b) ((a)<(b)?(a):(b))
83 #endif
86 #define KGPROF_PROFILING 0
87 #if KGPROF_PROFILING
88 #define KGPROF_DEPTH 3 /* this needs to match the code below */
89 #define KGPROF_SIZE 100
90 static struct {
91 unsigned addr[KGPROF_DEPTH];
92 unsigned count;
93 } kgprof_counters[KGPROF_SIZE];
95 /* just call this function from whatever function you think needs it then
96 look at /proc/cpuinfo to see where the function is being called from
97 and how often. This gives a type of "kernel gprof" */
98 #define NEXT_PROF(prev,lvl) (prev>PAGE_OFFSET?__builtin_return_address(lvl):0)
99 static inline void kgprof_profile(void)
101 unsigned ret[KGPROF_DEPTH];
102 int i,j;
103 /* you can't use a variable argument to __builtin_return_address() */
104 ret[0] = (unsigned)__builtin_return_address(0);
105 ret[1] = (unsigned)NEXT_PROF(ret[0],1);
106 ret[2] = (unsigned)NEXT_PROF(ret[1],2);
108 for (i=0;i<KGPROF_SIZE && kgprof_counters[i].addr[0];i++) {
109 for (j=0;j<KGPROF_DEPTH;j++)
110 if (ret[j] != kgprof_counters[i].addr[j]) break;
111 if (j==KGPROF_DEPTH) break;
113 if (i<KGPROF_SIZE) {
114 for (j=0;j<KGPROF_DEPTH;j++)
115 kgprof_counters[i].addr[j] = ret[j];
116 kgprof_counters[i].count++;
119 #endif
122 /* Flushing the cache. */
123 struct sun4c_vac_props sun4c_vacinfo;
124 static int ctxflushes, segflushes, pageflushes;
125 unsigned long sun4c_kernel_faults;
127 /* convert a virtual address to a physical address and vice
128 versa. Easy on the 4c */
129 static unsigned long sun4c_v2p(unsigned long vaddr)
131 return(vaddr - PAGE_OFFSET);
134 static unsigned long sun4c_p2v(unsigned long vaddr)
136 return(vaddr + PAGE_OFFSET);
140 /* Invalidate every sun4c cache line tag. */
141 void sun4c_flush_all(void)
143 unsigned long begin, end;
145 if(sun4c_vacinfo.on)
146 panic("SUN4C: AIEEE, trying to invalidate vac while"
147 " it is on.");
149 /* Clear 'valid' bit in all cache line tags */
150 begin = AC_CACHETAGS;
151 end = (AC_CACHETAGS + SUN4C_VAC_SIZE);
152 while(begin < end) {
153 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
154 "r" (begin), "i" (ASI_CONTROL));
155 begin += sun4c_vacinfo.linesize;
159 /* Context level flush. */
160 static inline void sun4c_flush_context_hw(void)
162 unsigned long end = SUN4C_VAC_SIZE;
163 unsigned pgsz = PAGE_SIZE;
165 ctxflushes++;
166 __asm__ __volatile__("
167 1: subcc %0, %2, %0
168 bg 1b
169 sta %%g0, [%0] %3
170 nop; nop; nop; ! Weitek hwbug
171 " : "=&r" (end)
172 : "0" (end), "r" (pgsz), "i" (ASI_HWFLUSHCONTEXT)
173 : "cc");
176 /* Don't inline the software version as it eats too many cache lines if expanded. */
177 static void sun4c_flush_context_sw(void)
179 unsigned long nbytes = SUN4C_VAC_SIZE;
180 unsigned long lsize = sun4c_vacinfo.linesize;
182 ctxflushes++;
183 __asm__ __volatile__("
184 add %2, %2, %%g1
185 add %2, %%g1, %%g2
186 add %2, %%g2, %%g3
187 add %2, %%g3, %%g4
188 add %2, %%g4, %%g5
189 add %2, %%g5, %%o4
190 add %2, %%o4, %%o5
191 1: subcc %0, %%o5, %0
192 sta %%g0, [%0] %3
193 sta %%g0, [%0 + %2] %3
194 sta %%g0, [%0 + %%g1] %3
195 sta %%g0, [%0 + %%g2] %3
196 sta %%g0, [%0 + %%g3] %3
197 sta %%g0, [%0 + %%g4] %3
198 sta %%g0, [%0 + %%g5] %3
199 bg 1b
200 sta %%g0, [%1 + %%o4] %3
201 " : "=&r" (nbytes)
202 : "0" (nbytes), "r" (lsize), "i" (ASI_FLUSHCTX)
203 : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
206 /* Scrape the segment starting at ADDR from the virtual cache. */
207 static inline void sun4c_flush_segment(unsigned long addr)
209 if(sun4c_get_segmap(addr) == invalid_segment)
210 return;
212 segflushes++;
213 if(sun4c_vacinfo.do_hwflushes) {
214 unsigned long end = (addr + SUN4C_VAC_SIZE);
216 for( ; addr < end; addr += PAGE_SIZE)
217 __asm__ __volatile__("sta %%g0, [%0] %1;nop;nop;nop;\n\t" : :
218 "r" (addr), "i" (ASI_HWFLUSHSEG));
219 } else {
220 unsigned long nbytes = SUN4C_VAC_SIZE;
221 unsigned long lsize = sun4c_vacinfo.linesize;
223 __asm__ __volatile__("add %2, %2, %%g1\n\t"
224 "add %2, %%g1, %%g2\n\t"
225 "add %2, %%g2, %%g3\n\t"
226 "add %2, %%g3, %%g4\n\t"
227 "add %2, %%g4, %%g5\n\t"
228 "add %2, %%g5, %%o4\n\t"
229 "add %2, %%o4, %%o5\n"
230 "1:\n\t"
231 "subcc %1, %%o5, %1\n\t"
232 "sta %%g0, [%0] %6\n\t"
233 "sta %%g0, [%0 + %2] %6\n\t"
234 "sta %%g0, [%0 + %%g1] %6\n\t"
235 "sta %%g0, [%0 + %%g2] %6\n\t"
236 "sta %%g0, [%0 + %%g3] %6\n\t"
237 "sta %%g0, [%0 + %%g4] %6\n\t"
238 "sta %%g0, [%0 + %%g5] %6\n\t"
239 "sta %%g0, [%0 + %%o4] %6\n\t"
240 "bg 1b\n\t"
241 " add %0, %%o5, %0\n\t"
242 : "=&r" (addr), "=&r" (nbytes), "=&r" (lsize)
243 : "0" (addr), "1" (nbytes), "2" (lsize),
244 "i" (ASI_FLUSHSEG)
245 : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
249 /* Call this version when you know hardware flushes are available. */
250 static inline void sun4c_flush_segment_hw(unsigned long addr)
252 if(sun4c_get_segmap(addr) != invalid_segment) {
253 unsigned long end;
255 segflushes++;
256 for(end = addr + SUN4C_VAC_SIZE; addr < end; addr += PAGE_SIZE)
257 __asm__ __volatile__("sta %%g0, [%0] %1"
258 : : "r" (addr), "i" (ASI_HWFLUSHSEG));
259 /* Weitek POWER-UP hwbug workaround. */
260 __asm__ __volatile__("nop;nop;nop; ! Weitek hwbug");
264 /* Don't inline the software version as it eats too many cache lines if expanded. */
265 static void sun4c_flush_segment_sw(unsigned long addr)
267 if(sun4c_get_segmap(addr) != invalid_segment) {
268 unsigned long nbytes = SUN4C_VAC_SIZE;
269 unsigned long lsize = sun4c_vacinfo.linesize;
271 segflushes++;
272 __asm__ __volatile__("
273 add %2, %2, %%g1
274 add %2, %%g1, %%g2
275 add %2, %%g2, %%g3
276 add %2, %%g3, %%g4
277 add %2, %%g4, %%g5
278 add %2, %%g5, %%o4
279 add %2, %%o4, %%o5
280 1: subcc %1, %%o5, %1
281 sta %%g0, [%0] %6
282 sta %%g0, [%0 + %2] %6
283 sta %%g0, [%0 + %%g1] %6
284 sta %%g0, [%0 + %%g2] %6
285 sta %%g0, [%0 + %%g3] %6
286 sta %%g0, [%0 + %%g4] %6
287 sta %%g0, [%0 + %%g5] %6
288 sta %%g0, [%0 + %%o4] %6
289 bg 1b
290 add %0, %%o5, %0
291 " : "=&r" (addr), "=&r" (nbytes), "=&r" (lsize)
292 : "0" (addr), "1" (nbytes), "2" (lsize),
293 "i" (ASI_FLUSHSEG)
294 : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
298 /* Bolix one page from the virtual cache. */
299 static void sun4c_flush_page(unsigned long addr)
301 addr &= PAGE_MASK;
303 if((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) !=
304 _SUN4C_PAGE_VALID)
305 return;
307 pageflushes++;
308 if(sun4c_vacinfo.do_hwflushes) {
309 __asm__ __volatile__("sta %%g0, [%0] %1;nop;nop;nop;\n\t" : :
310 "r" (addr), "i" (ASI_HWFLUSHPAGE));
311 } else {
312 unsigned long left = PAGE_SIZE;
313 unsigned long lsize = sun4c_vacinfo.linesize;
315 __asm__ __volatile__("add %2, %2, %%g1\n\t"
316 "add %2, %%g1, %%g2\n\t"
317 "add %2, %%g2, %%g3\n\t"
318 "add %2, %%g3, %%g4\n\t"
319 "add %2, %%g4, %%g5\n\t"
320 "add %2, %%g5, %%o4\n\t"
321 "add %2, %%o4, %%o5\n"
322 "1:\n\t"
323 "subcc %1, %%o5, %1\n\t"
324 "sta %%g0, [%0] %6\n\t"
325 "sta %%g0, [%0 + %2] %6\n\t"
326 "sta %%g0, [%0 + %%g1] %6\n\t"
327 "sta %%g0, [%0 + %%g2] %6\n\t"
328 "sta %%g0, [%0 + %%g3] %6\n\t"
329 "sta %%g0, [%0 + %%g4] %6\n\t"
330 "sta %%g0, [%0 + %%g5] %6\n\t"
331 "sta %%g0, [%0 + %%o4] %6\n\t"
332 "bg 1b\n\t"
333 " add %0, %%o5, %0\n\t"
334 : "=&r" (addr), "=&r" (left), "=&r" (lsize)
335 : "0" (addr), "1" (left), "2" (lsize),
336 "i" (ASI_FLUSHPG)
337 : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
341 /* Again, hw-only and sw-only cache page-level flush variants. */
342 static inline void sun4c_flush_page_hw(unsigned long addr)
344 addr &= PAGE_MASK;
345 if((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) ==
346 _SUN4C_PAGE_VALID) {
347 pageflushes++;
348 __asm__ __volatile__("sta %%g0, [%0] %1"
349 : : "r" (addr), "i" (ASI_HWFLUSHPAGE));
350 /* Weitek POWER-UP hwbug workaround. */
351 __asm__ __volatile__("nop;nop;nop; ! Weitek hwbug");
355 /* Don't inline the software version as it eats too many cache lines if expanded. */
356 static void sun4c_flush_page_sw(unsigned long addr)
358 addr &= PAGE_MASK;
359 if((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) ==
360 _SUN4C_PAGE_VALID) {
361 unsigned long left = PAGE_SIZE;
362 unsigned long lsize = sun4c_vacinfo.linesize;
364 pageflushes++;
365 __asm__ __volatile__("
366 add %2, %2, %%g1
367 add %2, %%g1, %%g2
368 add %2, %%g2, %%g3
369 add %2, %%g3, %%g4
370 add %2, %%g4, %%g5
371 add %2, %%g5, %%o4
372 add %2, %%o4, %%o5
373 1: subcc %1, %%o5, %1
374 sta %%g0, [%0] %6
375 sta %%g0, [%0 + %2] %6
376 sta %%g0, [%0 + %%g1] %6
377 sta %%g0, [%0 + %%g2] %6
378 sta %%g0, [%0 + %%g3] %6
379 sta %%g0, [%0 + %%g4] %6
380 sta %%g0, [%0 + %%g5] %6
381 sta %%g0, [%0 + %%o4] %6
382 bg 1b
383 add %0, %%o5, %0
384 " : "=&r" (addr), "=&r" (left), "=&r" (lsize)
385 : "0" (addr), "1" (left), "2" (lsize),
386 "i" (ASI_FLUSHPG)
387 : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
391 /* The sun4c's do have an on chip store buffer. And the way you
392 * clear them out isn't so obvious. The only way I can think of
393 * to accomplish this is to read the current context register,
394 * store the same value there, then read an external hardware
395 * register.
397 void sun4c_complete_all_stores(void)
399 volatile int _unused;
401 _unused = sun4c_get_context();
402 sun4c_set_context(_unused);
403 #ifdef CONFIG_SUN_AUXIO
404 _unused = *AUXREG;
405 #endif
408 /* Bootup utility functions. */
409 static inline void sun4c_init_clean_segmap(unsigned char pseg)
411 unsigned long vaddr;
413 sun4c_put_segmap(0, pseg);
414 for(vaddr = 0; vaddr < SUN4C_REAL_PGDIR_SIZE; vaddr+=PAGE_SIZE)
415 sun4c_put_pte(vaddr, 0);
416 sun4c_put_segmap(0, invalid_segment);
419 static inline void sun4c_init_clean_mmu(unsigned long kernel_end)
421 unsigned long vaddr;
422 unsigned char savectx, ctx;
424 savectx = sun4c_get_context();
425 kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end);
426 for(ctx = 0; ctx < num_contexts; ctx++) {
427 sun4c_set_context(ctx);
428 for(vaddr = 0; vaddr < 0x20000000; vaddr += SUN4C_REAL_PGDIR_SIZE)
429 sun4c_put_segmap(vaddr, invalid_segment);
430 for(vaddr = 0xe0000000; vaddr < KERNBASE; vaddr += SUN4C_REAL_PGDIR_SIZE)
431 sun4c_put_segmap(vaddr, invalid_segment);
432 for(vaddr = kernel_end; vaddr < KADB_DEBUGGER_BEGVM; vaddr += SUN4C_REAL_PGDIR_SIZE)
433 sun4c_put_segmap(vaddr, invalid_segment);
434 for(vaddr = LINUX_OPPROM_ENDVM; vaddr; vaddr += SUN4C_REAL_PGDIR_SIZE)
435 sun4c_put_segmap(vaddr, invalid_segment);
437 sun4c_set_context(savectx);
440 void __init sun4c_probe_vac(void)
442 sun4c_disable_vac();
444 if (ARCH_SUN4) {
445 switch(idprom->id_machtype) {
447 case (SM_SUN4|SM_4_110):
448 sun4c_vacinfo.type = NONE;
449 sun4c_vacinfo.num_bytes = 0;
450 sun4c_vacinfo.linesize = 0;
451 sun4c_vacinfo.do_hwflushes = 0;
452 prom_printf("No VAC. Get some bucks and buy a real computer.");
453 prom_halt();
454 break;
456 case (SM_SUN4|SM_4_260):
457 sun4c_vacinfo.type = WRITE_BACK;
458 sun4c_vacinfo.num_bytes = 128 * 1024;
459 sun4c_vacinfo.linesize = 16;
460 sun4c_vacinfo.do_hwflushes = 0;
461 break;
463 case (SM_SUN4|SM_4_330):
464 sun4c_vacinfo.type = WRITE_THROUGH;
465 sun4c_vacinfo.num_bytes = 128 * 1024;
466 sun4c_vacinfo.linesize = 16;
467 sun4c_vacinfo.do_hwflushes = 0;
468 break;
470 case (SM_SUN4|SM_4_470):
471 sun4c_vacinfo.type = WRITE_BACK;
472 sun4c_vacinfo.num_bytes = 128 * 1024;
473 sun4c_vacinfo.linesize = 32;
474 sun4c_vacinfo.do_hwflushes = 0;
475 break;
477 default:
478 prom_printf("Cannot initialize VAC - wierd sun4 model idprom->id_machtype = %d", idprom->id_machtype);
479 prom_halt();
481 } else {
482 sun4c_vacinfo.type = WRITE_THROUGH;
484 if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
485 (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
486 /* PROM on SS1 lacks this info, to be super safe we
487 * hard code it here since this arch is cast in stone.
489 sun4c_vacinfo.num_bytes = 65536;
490 sun4c_vacinfo.linesize = 16;
491 } else {
492 sun4c_vacinfo.num_bytes =
493 prom_getintdefault(prom_root_node, "vac-size", 65536);
494 sun4c_vacinfo.linesize =
495 prom_getintdefault(prom_root_node, "vac-linesize", 16);
497 sun4c_vacinfo.do_hwflushes =
498 prom_getintdefault(prom_root_node, "vac-hwflush", 0);
500 if(sun4c_vacinfo.do_hwflushes == 0)
501 sun4c_vacinfo.do_hwflushes =
502 prom_getintdefault(prom_root_node, "vac_hwflush", 0);
504 if (sun4c_vacinfo.num_bytes != 65536) {
505 prom_printf("WEIRD Sun4C VAC cache size, tell davem");
506 prom_halt();
510 sun4c_vacinfo.num_lines =
511 (sun4c_vacinfo.num_bytes / sun4c_vacinfo.linesize);
512 switch(sun4c_vacinfo.linesize) {
513 case 16:
514 sun4c_vacinfo.log2lsize = 4;
515 break;
516 case 32:
517 sun4c_vacinfo.log2lsize = 5;
518 break;
519 default:
520 prom_printf("probe_vac: Didn't expect vac-linesize of %d, halting\n",
521 sun4c_vacinfo.linesize);
522 prom_halt();
525 sun4c_flush_all();
526 sun4c_enable_vac();
529 /* Patch instructions for the low level kernel fault handler. */
530 extern unsigned long invalid_segment_patch1, invalid_segment_patch1_ff;
531 extern unsigned long invalid_segment_patch2, invalid_segment_patch2_ff;
532 extern unsigned long invalid_segment_patch1_1ff, invalid_segment_patch2_1ff;
533 extern unsigned long num_context_patch1, num_context_patch1_16;
534 extern unsigned long num_context_patch2, num_context_patch2_16;
535 extern unsigned long vac_linesize_patch, vac_linesize_patch_32;
536 extern unsigned long vac_hwflush_patch1, vac_hwflush_patch1_on;
537 extern unsigned long vac_hwflush_patch2, vac_hwflush_patch2_on;
539 #define PATCH_INSN(src, dst) do { \
540 daddr = &(dst); \
541 iaddr = &(src); \
542 *daddr = *iaddr; \
543 } while (0);
545 static void patch_kernel_fault_handler(void)
547 unsigned long *iaddr, *daddr;
549 switch (num_segmaps) {
550 case 128:
551 /* Default, nothing to do. */
552 break;
553 case 256:
554 PATCH_INSN(invalid_segment_patch1_ff,
555 invalid_segment_patch1);
556 PATCH_INSN(invalid_segment_patch2_ff,
557 invalid_segment_patch2);
558 break;
559 case 512:
560 PATCH_INSN(invalid_segment_patch1_1ff,
561 invalid_segment_patch1);
562 PATCH_INSN(invalid_segment_patch2_1ff,
563 invalid_segment_patch2);
564 break;
565 default:
566 prom_printf("Unhandled number of segmaps: %d\n",
567 num_segmaps);
568 prom_halt();
570 switch (num_contexts) {
571 case 8:
572 /* Default, nothing to do. */
573 break;
574 case 16:
575 PATCH_INSN(num_context_patch1_16,
576 num_context_patch1);
577 PATCH_INSN(num_context_patch2_16,
578 num_context_patch2);
579 break;
580 default:
581 prom_printf("Unhandled number of contexts: %d\n",
582 num_contexts);
583 prom_halt();
585 if(sun4c_vacinfo.do_hwflushes != 0) {
586 PATCH_INSN(vac_hwflush_patch1_on, vac_hwflush_patch1);
587 PATCH_INSN(vac_hwflush_patch2_on, vac_hwflush_patch2);
588 } else {
589 switch(sun4c_vacinfo.linesize) {
590 case 16:
591 /* Default, nothing to do. */
592 break;
593 case 32:
594 PATCH_INSN(vac_linesize_patch_32, vac_linesize_patch);
595 break;
596 default:
597 prom_printf("Impossible VAC linesize %d, halting...\n",
598 sun4c_vacinfo.linesize);
599 prom_halt();
604 static void __init sun4c_probe_mmu(void)
606 if (ARCH_SUN4) {
607 switch(idprom->id_machtype) {
608 case (SM_SUN4|SM_4_110):
609 prom_printf("No support for 4100 yet\n");
610 prom_halt();
611 num_segmaps = 256;
612 num_contexts = 8;
613 break;
615 case (SM_SUN4|SM_4_260):
616 /* should be 512 segmaps. when it get fixed */
617 num_segmaps = 256;
618 num_contexts = 16;
619 break;
621 case (SM_SUN4|SM_4_330):
622 num_segmaps = 256;
623 num_contexts = 16;
624 break;
626 case (SM_SUN4|SM_4_470):
627 /* should be 1024 segmaps. when it get fixed */
628 num_segmaps = 256;
629 num_contexts = 64;
630 break;
631 default:
632 prom_printf("Invalid SUN4 model\n");
633 prom_halt();
635 } else {
636 if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
637 (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
638 /* Hardcode these just to be safe, PROM on SS1 does
639 * not have this info available in the root node.
641 num_segmaps = 128;
642 num_contexts = 8;
643 } else {
644 num_segmaps =
645 prom_getintdefault(prom_root_node, "mmu-npmg", 128);
646 num_contexts =
647 prom_getintdefault(prom_root_node, "mmu-nctx", 0x8);
650 patch_kernel_fault_handler();
653 volatile unsigned long *sun4c_memerr_reg = 0;
655 void __init sun4c_probe_memerr_reg(void)
657 int node;
658 struct linux_prom_registers regs[1];
660 if (ARCH_SUN4) {
661 sun4c_memerr_reg = sparc_alloc_io(sun4_memreg_physaddr, 0,
662 PAGE_SIZE,
663 "memory parity error",
664 0x0, 0);
665 } else {
666 node = prom_getchild(prom_root_node);
667 node = prom_searchsiblings(prom_root_node, "memory-error");
668 if (!node)
669 return;
670 prom_getproperty(node, "reg", (char *)regs, sizeof(regs));
671 sun4c_memerr_reg = sparc_alloc_io(regs[0].phys_addr, 0,
672 regs[0].reg_size,
673 "memory parity error",
674 regs[0].which_io, 0);
678 static inline void sun4c_init_ss2_cache_bug(void)
680 extern unsigned long start;
682 if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) ||
683 (idprom->id_machtype == (SM_SUN4C | SM_4C_IPX)) ||
684 (idprom->id_machtype == (SM_SUN4 | SM_4_330)) ||
685 (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC))) {
686 /* Whee.. */
687 printk("SS2 cache bug detected, uncaching trap table page\n");
688 sun4c_flush_page((unsigned int) &start);
689 sun4c_put_pte(((unsigned long) &start),
690 (sun4c_get_pte((unsigned long) &start) | _SUN4C_PAGE_NOCACHE));
694 /* Addr is always aligned on a page boundry for us already. */
695 static void sun4c_map_dma_area(unsigned long addr, int len)
697 unsigned long page, end;
699 end = PAGE_ALIGN((addr + len));
700 while(addr < end) {
701 page = get_free_page(GFP_KERNEL);
702 if(!page) {
703 prom_printf("alloc_dvma: Cannot get a dvma page\n");
704 prom_halt();
706 sun4c_flush_page(page);
707 page -= PAGE_OFFSET;
708 page >>= PAGE_SHIFT;
709 page |= (_SUN4C_PAGE_VALID | _SUN4C_PAGE_DIRTY |
710 _SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_PRIV);
711 sun4c_put_pte(addr, page);
712 addr += PAGE_SIZE;
717 /* TLB management. */
719 /* Don't change this struct without changing entry.S. This is used
720 * in the in-window kernel fault handler, and you don't want to mess
721 * with that. (See sun4c_fault in entry.S).
723 struct sun4c_mmu_entry {
724 struct sun4c_mmu_entry *next;
725 struct sun4c_mmu_entry *prev;
726 unsigned long vaddr;
727 unsigned char pseg;
728 unsigned char locked;
731 static struct sun4c_mmu_entry mmu_entry_pool[SUN4C_MAX_SEGMAPS];
733 static void __init sun4c_init_mmu_entry_pool(void)
735 int i;
737 for(i=0; i < SUN4C_MAX_SEGMAPS; i++) {
738 mmu_entry_pool[i].pseg = i;
739 mmu_entry_pool[i].next = 0;
740 mmu_entry_pool[i].prev = 0;
741 mmu_entry_pool[i].vaddr = 0;
742 mmu_entry_pool[i].locked = 0;
744 mmu_entry_pool[invalid_segment].locked = 1;
747 static inline void fix_permissions(unsigned long vaddr, unsigned long bits_on,
748 unsigned long bits_off)
750 unsigned long start, end;
752 end = vaddr + SUN4C_REAL_PGDIR_SIZE;
753 for(start = vaddr; start < end; start += PAGE_SIZE)
754 if(sun4c_get_pte(start) & _SUN4C_PAGE_VALID)
755 sun4c_put_pte(start, (sun4c_get_pte(start) | bits_on) &
756 ~bits_off);
759 static inline void sun4c_init_map_kernelprom(unsigned long kernel_end)
761 unsigned long vaddr;
762 unsigned char pseg, ctx;
763 #ifdef CONFIG_SUN4
764 /* sun4/110 and 260 have no kadb. */
765 if((idprom->id_machtype != (SM_SUN4 | SM_4_260)) &&
766 (idprom->id_machtype != (SM_SUN4 | SM_4_110))) {
767 #endif
768 for(vaddr = KADB_DEBUGGER_BEGVM;
769 vaddr < LINUX_OPPROM_ENDVM;
770 vaddr += SUN4C_REAL_PGDIR_SIZE) {
771 pseg = sun4c_get_segmap(vaddr);
772 if(pseg != invalid_segment) {
773 mmu_entry_pool[pseg].locked = 1;
774 for(ctx = 0; ctx < num_contexts; ctx++)
775 prom_putsegment(ctx, vaddr, pseg);
776 fix_permissions(vaddr, _SUN4C_PAGE_PRIV, 0);
779 #ifdef CONFIG_SUN4
781 #endif
782 for(vaddr = KERNBASE; vaddr < kernel_end; vaddr += SUN4C_REAL_PGDIR_SIZE) {
783 pseg = sun4c_get_segmap(vaddr);
784 mmu_entry_pool[pseg].locked = 1;
785 for(ctx = 0; ctx < num_contexts; ctx++)
786 prom_putsegment(ctx, vaddr, pseg);
787 fix_permissions(vaddr, _SUN4C_PAGE_PRIV, _SUN4C_PAGE_NOCACHE);
791 static void __init sun4c_init_lock_area(unsigned long start, unsigned long end)
793 int i, ctx;
795 while(start < end) {
796 for(i=0; i < invalid_segment; i++)
797 if(!mmu_entry_pool[i].locked)
798 break;
799 mmu_entry_pool[i].locked = 1;
800 sun4c_init_clean_segmap(i);
801 for(ctx = 0; ctx < num_contexts; ctx++)
802 prom_putsegment(ctx, start, mmu_entry_pool[i].pseg);
803 start += SUN4C_REAL_PGDIR_SIZE;
807 /* Don't change this struct without changing entry.S. This is used
808 * in the in-window kernel fault handler, and you don't want to mess
809 * with that. (See sun4c_fault in entry.S).
811 struct sun4c_mmu_ring {
812 struct sun4c_mmu_entry ringhd;
813 int num_entries;
816 static struct sun4c_mmu_ring sun4c_context_ring[SUN4C_MAX_CONTEXTS]; /* used user entries */
817 static struct sun4c_mmu_ring sun4c_ufree_ring; /* free user entries */
818 struct sun4c_mmu_ring sun4c_kernel_ring; /* used kernel entries */
819 struct sun4c_mmu_ring sun4c_kfree_ring; /* free kernel entries */
821 static inline void sun4c_init_rings(unsigned long *mempool)
823 int i;
824 for(i=0; i<SUN4C_MAX_CONTEXTS; i++) {
825 sun4c_context_ring[i].ringhd.next =
826 sun4c_context_ring[i].ringhd.prev =
827 &sun4c_context_ring[i].ringhd;
828 sun4c_context_ring[i].num_entries = 0;
830 sun4c_ufree_ring.ringhd.next = sun4c_ufree_ring.ringhd.prev =
831 &sun4c_ufree_ring.ringhd;
832 sun4c_ufree_ring.num_entries = 0;
833 sun4c_kernel_ring.ringhd.next = sun4c_kernel_ring.ringhd.prev =
834 &sun4c_kernel_ring.ringhd;
835 sun4c_kernel_ring.num_entries = 0;
836 sun4c_kfree_ring.ringhd.next = sun4c_kfree_ring.ringhd.prev =
837 &sun4c_kfree_ring.ringhd;
838 sun4c_kfree_ring.num_entries = 0;
841 static inline void add_ring(struct sun4c_mmu_ring *ring,
842 struct sun4c_mmu_entry *entry)
844 struct sun4c_mmu_entry *head = &ring->ringhd;
846 entry->prev = head;
847 (entry->next = head->next)->prev = entry;
848 head->next = entry;
849 ring->num_entries++;
852 static inline void add_ring_ordered(struct sun4c_mmu_ring *ring,
853 struct sun4c_mmu_entry *entry)
855 struct sun4c_mmu_entry *head = &ring->ringhd;
856 unsigned long addr = entry->vaddr;
858 if(head->next != &ring->ringhd) {
859 while((head->next != &ring->ringhd) && (head->next->vaddr < addr))
860 head = head->next;
862 entry->prev = head;
863 (entry->next = head->next)->prev = entry;
864 head->next = entry;
865 ring->num_entries++;
868 static inline void remove_ring(struct sun4c_mmu_ring *ring,
869 struct sun4c_mmu_entry *entry)
871 struct sun4c_mmu_entry *next = entry->next;
873 (next->prev = entry->prev)->next = next;
874 ring->num_entries--;
875 #ifdef DEBUG_SUN4C_MM
876 if(ring->num_entries < 0)
877 panic("sun4c: Ring num_entries < 0!");
878 #endif
881 static inline void free_user_entry(int ctx, struct sun4c_mmu_entry *entry)
883 remove_ring(sun4c_context_ring+ctx, entry);
884 add_ring(&sun4c_ufree_ring, entry);
887 static inline void assign_user_entry(int ctx, struct sun4c_mmu_entry *entry)
889 remove_ring(&sun4c_ufree_ring, entry);
890 add_ring_ordered(sun4c_context_ring+ctx, entry);
893 static inline void free_kernel_entry(struct sun4c_mmu_entry *entry,
894 struct sun4c_mmu_ring *ring)
896 remove_ring(ring, entry);
897 add_ring(&sun4c_kfree_ring, entry);
900 static void __init sun4c_init_fill_kernel_ring(int howmany)
902 int i;
904 while(howmany) {
905 for(i=0; i < invalid_segment; i++)
906 if(!mmu_entry_pool[i].locked)
907 break;
908 mmu_entry_pool[i].locked = 1;
909 sun4c_init_clean_segmap(i);
910 add_ring(&sun4c_kfree_ring, &mmu_entry_pool[i]);
911 howmany--;
915 static void __init sun4c_init_fill_user_ring(void)
917 int i;
919 for(i=0; i < invalid_segment; i++) {
920 if(mmu_entry_pool[i].locked)
921 continue;
922 sun4c_init_clean_segmap(i);
923 add_ring(&sun4c_ufree_ring, &mmu_entry_pool[i]);
927 static inline void sun4c_kernel_unmap(struct sun4c_mmu_entry *kentry)
929 int savectx, ctx;
931 savectx = sun4c_get_context();
932 for(ctx = 0; ctx < num_contexts; ctx++) {
933 sun4c_set_context(ctx);
934 sun4c_put_segmap(kentry->vaddr, invalid_segment);
936 sun4c_set_context(savectx);
939 static inline void sun4c_kernel_map(struct sun4c_mmu_entry *kentry)
941 int savectx, ctx;
943 savectx = sun4c_get_context();
944 for(ctx = 0; ctx < num_contexts; ctx++) {
945 sun4c_set_context(ctx);
946 sun4c_put_segmap(kentry->vaddr, kentry->pseg);
948 sun4c_set_context(savectx);
951 static inline void sun4c_user_unmap(struct sun4c_mmu_entry *uentry)
953 sun4c_put_segmap(uentry->vaddr, invalid_segment);
956 static inline void sun4c_user_map(struct sun4c_mmu_entry *uentry)
958 unsigned long start = uentry->vaddr;
959 unsigned long end = start + SUN4C_REAL_PGDIR_SIZE;
961 sun4c_put_segmap(uentry->vaddr, uentry->pseg);
962 while(start < end) {
963 sun4c_put_pte(start, 0);
964 start += PAGE_SIZE;
968 static void sun4c_demap_context_hw(struct sun4c_mmu_ring *crp, unsigned char ctx)
970 struct sun4c_mmu_entry *head = &crp->ringhd;
971 unsigned long flags;
973 save_and_cli(flags);
974 if(head->next != head) {
975 struct sun4c_mmu_entry *entry = head->next;
976 int savectx = sun4c_get_context();
978 FUW_INLINE
979 sun4c_set_context(ctx);
980 sun4c_flush_context_hw();
981 do {
982 struct sun4c_mmu_entry *next = entry->next;
984 sun4c_user_unmap(entry);
985 free_user_entry(ctx, entry);
987 entry = next;
988 } while(entry != head);
989 sun4c_set_context(savectx);
991 restore_flags(flags);
994 static void sun4c_demap_context_sw(struct sun4c_mmu_ring *crp, unsigned char ctx)
996 struct sun4c_mmu_entry *head = &crp->ringhd;
997 unsigned long flags;
999 save_and_cli(flags);
1000 if(head->next != head) {
1001 struct sun4c_mmu_entry *entry = head->next;
1002 int savectx = sun4c_get_context();
1004 FUW_INLINE
1005 sun4c_set_context(ctx);
1006 sun4c_flush_context_sw();
1007 do {
1008 struct sun4c_mmu_entry *next = entry->next;
1010 sun4c_user_unmap(entry);
1011 free_user_entry(ctx, entry);
1013 entry = next;
1014 } while(entry != head);
1015 sun4c_set_context(savectx);
1017 restore_flags(flags);
1020 static inline void sun4c_demap_one(struct sun4c_mmu_ring *crp, unsigned char ctx)
1022 /* by using .prev we get a kind of "lru" algorithm */
1023 struct sun4c_mmu_entry *entry = crp->ringhd.prev;
1024 unsigned long flags;
1025 int savectx = sun4c_get_context();
1027 #ifdef DEBUG_SUN4C_MM
1028 if(entry == &crp->ringhd)
1029 panic("sun4c_demap_one: Freeing from empty ctx ring.");
1030 #endif
1031 FUW_INLINE
1032 save_and_cli(flags);
1033 sun4c_set_context(ctx);
1034 sun4c_flush_segment(entry->vaddr);
1035 sun4c_user_unmap(entry);
1036 free_user_entry(ctx, entry);
1037 sun4c_set_context(savectx);
1038 restore_flags(flags);
1041 static int sun4c_user_taken_entries = 0; /* This is how much we have. */
1042 static int max_user_taken_entries = 0; /* This limits us and prevents deadlock. */
1044 static inline struct sun4c_mmu_entry *sun4c_kernel_strategy(void)
1046 struct sun4c_mmu_entry *this_entry;
1048 /* If some are free, return first one. */
1049 if(sun4c_kfree_ring.num_entries) {
1050 this_entry = sun4c_kfree_ring.ringhd.next;
1051 return this_entry;
1054 /* Else free one up. */
1055 this_entry = sun4c_kernel_ring.ringhd.prev;
1056 sun4c_flush_segment(this_entry->vaddr);
1057 sun4c_kernel_unmap(this_entry);
1058 free_kernel_entry(this_entry, &sun4c_kernel_ring);
1059 this_entry = sun4c_kfree_ring.ringhd.next;
1061 return this_entry;
1064 void sun4c_shrink_kernel_ring(void)
1066 struct sun4c_mmu_entry *entry;
1067 unsigned long flags;
1069 /* If an interrupt comes in here, we die... */
1070 save_and_cli(flags);
1072 if (sun4c_user_taken_entries) {
1073 entry = sun4c_kernel_strategy();
1074 remove_ring(&sun4c_kfree_ring, entry);
1075 add_ring(&sun4c_ufree_ring, entry);
1076 sun4c_user_taken_entries--;
1077 #if 0
1078 printk("shrink: ufree= %d, kfree= %d, kernel= %d\n",
1079 sun4c_ufree_ring.num_entries,
1080 sun4c_kfree_ring.num_entries,
1081 sun4c_kernel_ring.num_entries);
1082 #endif
1083 #ifdef DEBUG_SUN4C_MM
1084 if(sun4c_user_taken_entries < 0)
1085 panic("sun4c_shrink_kernel_ring: taken < 0.");
1086 #endif
1088 restore_flags(flags);
1091 /* Using this method to free up mmu entries eliminates a lot of
1092 * potential races since we have a kernel that incurs tlb
1093 * replacement faults. There may be performance penalties.
1095 static inline struct sun4c_mmu_entry *sun4c_user_strategy(void)
1097 struct ctx_list *next_one;
1098 struct sun4c_mmu_ring *rp = 0;
1099 unsigned char ctx;
1100 #ifdef DEBUG_SUN4C_MM
1101 int lim = num_contexts;
1102 #endif
1104 /* If some are free, return first one. */
1105 if(sun4c_ufree_ring.num_entries) {
1106 #ifdef DEBUG_SUN4C_MM
1107 if(sun4c_ufree_ring.ringhd.next == &sun4c_ufree_ring.ringhd)
1108 panic("sun4c_user_strategy: num_entries!=0 but ring empty.");
1109 #endif
1110 return sun4c_ufree_ring.ringhd.next;
1113 if (sun4c_user_taken_entries) {
1114 sun4c_shrink_kernel_ring();
1115 #ifdef DEBUG_SUN4C_MM
1116 if(sun4c_ufree_ring.ringhd.next == &sun4c_ufree_ring.ringhd)
1117 panic("sun4c_user_strategy: kernel shrunk but ufree empty.");
1118 #endif
1119 return sun4c_ufree_ring.ringhd.next;
1122 /* Grab one from the LRU context. */
1123 next_one = ctx_used.next;
1124 while ((sun4c_context_ring[next_one->ctx_number].num_entries == 0)
1125 #ifdef DEBUG_SUN4C_MM
1126 && (--lim >= 0)
1127 #endif
1129 next_one = next_one->next;
1131 #ifdef DEBUG_SUN4C_MM
1132 if(lim < 0)
1133 panic("No user segmaps!");
1134 #endif
1136 ctx = next_one->ctx_number;
1137 rp = &sun4c_context_ring[ctx];
1139 sun4c_demap_one(rp, ctx);
1140 #ifdef DEBUG_SUN4C_MM
1141 if(sun4c_ufree_ring.ringhd.next == &sun4c_ufree_ring.ringhd)
1142 panic("sun4c_user_strategy: demapped one but ufree empty.");
1143 #endif
1144 return sun4c_ufree_ring.ringhd.next;
1147 void sun4c_grow_kernel_ring(void)
1149 struct sun4c_mmu_entry *entry;
1151 #if 0
1152 printk("grow: ");
1153 #endif
1155 /* Prevent deadlock condition. */
1156 if(sun4c_user_taken_entries >= max_user_taken_entries) {
1157 #if 0
1158 printk("deadlock avoidance, taken= %d max= %d\n",
1159 sun4c_user_taken_entries, max_user_taken_entries);
1160 #endif
1161 return;
1164 if (sun4c_ufree_ring.num_entries) {
1165 entry = sun4c_ufree_ring.ringhd.next;
1166 #ifdef DEBUG_SUN4C_MM
1167 if(entry == &sun4c_ufree_ring.ringhd)
1168 panic("\nsun4c_grow_kernel_ring: num_entries!=0, ring empty.");
1169 #endif
1170 remove_ring(&sun4c_ufree_ring, entry);
1171 add_ring(&sun4c_kfree_ring, entry);
1172 #ifdef DEBUG_SUN4C_MM
1173 if(sun4c_user_taken_entries < 0)
1174 panic("\nsun4c_grow_kernel_ring: taken < 0.");
1175 #endif
1176 sun4c_user_taken_entries++;
1177 #if 0
1178 printk("ufree= %d, kfree= %d, kernel= %d\n",
1179 sun4c_ufree_ring.num_entries,
1180 sun4c_kfree_ring.num_entries,
1181 sun4c_kernel_ring.num_entries);
1182 #endif
1186 static inline void alloc_user_segment(unsigned long address, unsigned char ctx)
1188 struct sun4c_mmu_entry *entry;
1189 unsigned long flags;
1191 save_and_cli(flags);
1192 entry = sun4c_user_strategy();
1193 entry->vaddr = (address & SUN4C_REAL_PGDIR_MASK);
1194 assign_user_entry(ctx, entry);
1195 sun4c_user_map(entry);
1196 restore_flags(flags);
1199 /* This is now a fast in-window trap handler to avoid any and all races. */
1200 static void sun4c_quick_kernel_fault(unsigned long address)
1202 printk("Kernel faults at addr 0x%08lx\n", address);
1203 panic("sun4c kernel fault handler bolixed...");
1206 /* 2 page buckets for task struct and kernel stack allocation.
1208 * TASK_STACK_BEGIN
1209 * bucket[0]
1210 * bucket[1]
1211 * [ ... ]
1212 * bucket[NR_TASKS-1]
1213 * TASK_STACK_BEGIN + (sizeof(struct task_bucket) * NR_TASKS)
1215 * Each slot looks like:
1217 * page 1 -- task struct + beginning of kernel stack
1218 * page 2 -- rest of kernel stack
1221 union task_union *sun4c_bucket[NR_TASKS];
1223 static int sun4c_lowbucket_avail;
1225 #define BUCKET_EMPTY ((union task_union *) 0)
1226 #define BUCKET_SHIFT (PAGE_SHIFT + 1) /* log2(sizeof(struct task_bucket)) */
1227 #define BUCKET_SIZE (1 << BUCKET_SHIFT)
1228 #define BUCKET_NUM(addr) ((((addr) - SUN4C_LOCK_VADDR) >> BUCKET_SHIFT))
1229 #define BUCKET_ADDR(num) (((num) << BUCKET_SHIFT) + SUN4C_LOCK_VADDR)
1230 #define BUCKET_PTE(page) \
1231 ((((page) - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(SUN4C_PAGE_KERNEL))
1232 #define BUCKET_PTE_PAGE(pte) \
1233 (PAGE_OFFSET + (((pte) & SUN4C_PFN_MASK) << PAGE_SHIFT))
1235 static inline void get_locked_segment(unsigned long addr)
1237 struct sun4c_mmu_entry *stolen;
1238 unsigned long flags;
1240 save_and_cli(flags);
1241 addr &= SUN4C_REAL_PGDIR_MASK;
1242 stolen = sun4c_user_strategy();
1243 remove_ring(&sun4c_ufree_ring, stolen);
1244 max_user_taken_entries--;
1245 #ifdef DEBUG_SUN4C_MM
1246 if(max_user_taken_entries < 0)
1247 panic("get_locked_segment: max_user_taken < 0.");
1248 #endif
1249 stolen->vaddr = addr;
1250 FUW_INLINE
1251 sun4c_kernel_map(stolen);
1252 restore_flags(flags);
1255 static inline void free_locked_segment(unsigned long addr)
1257 struct sun4c_mmu_entry *entry;
1258 unsigned long flags;
1259 unsigned char pseg;
1261 save_and_cli(flags);
1262 addr &= SUN4C_REAL_PGDIR_MASK;
1263 pseg = sun4c_get_segmap(addr);
1264 entry = &mmu_entry_pool[pseg];
1266 FUW_INLINE
1267 sun4c_flush_segment(addr);
1268 sun4c_kernel_unmap(entry);
1269 add_ring(&sun4c_ufree_ring, entry);
1270 #ifdef DEBUG_SUN4C_MM
1271 if(max_user_taken_entries < 0)
1272 panic("free_locked_segment: max_user_taken < 0.");
1273 #endif
1274 max_user_taken_entries++;
1275 restore_flags(flags);
1278 static inline void garbage_collect(int entry)
1280 int start, end;
1282 /* 32 buckets per segment... */
1283 entry &= ~31;
1284 start = entry;
1285 for(end = (start + 32); start < end; start++)
1286 if(sun4c_bucket[start] != BUCKET_EMPTY)
1287 return;
1289 /* Entire segment empty, release it. */
1290 free_locked_segment(BUCKET_ADDR(entry));
1293 #ifdef CONFIG_SUN4
1294 #define TASK_STRUCT_ORDER 0
1295 #else
1296 #define TASK_STRUCT_ORDER 1
1297 #endif
1299 static struct task_struct *sun4c_alloc_task_struct(void)
1301 unsigned long addr, pages;
1302 int entry;
1304 pages = __get_free_pages(GFP_KERNEL, TASK_STRUCT_ORDER);
1305 if(!pages)
1306 return (struct task_struct *) 0;
1308 for(entry = sun4c_lowbucket_avail; entry < NR_TASKS; entry++)
1309 if(sun4c_bucket[entry] == BUCKET_EMPTY)
1310 break;
1311 if(entry == NR_TASKS) {
1312 free_pages(pages, TASK_STRUCT_ORDER);
1313 return (struct task_struct *) 0;
1315 if(entry >= sun4c_lowbucket_avail)
1316 sun4c_lowbucket_avail = entry + 1;
1318 addr = BUCKET_ADDR(entry);
1319 sun4c_bucket[entry] = (union task_union *) addr;
1320 if(sun4c_get_segmap(addr) == invalid_segment)
1321 get_locked_segment(addr);
1322 sun4c_put_pte(addr, BUCKET_PTE(pages));
1323 #ifndef CONFIG_SUN4
1324 sun4c_put_pte(addr + PAGE_SIZE, BUCKET_PTE(pages + PAGE_SIZE));
1325 #endif
1326 return (struct task_struct *) addr;
1329 static void sun4c_free_task_struct_hw(struct task_struct *tsk)
1331 unsigned long tsaddr = (unsigned long) tsk;
1332 unsigned long pages = BUCKET_PTE_PAGE(sun4c_get_pte(tsaddr));
1333 int entry = BUCKET_NUM(tsaddr);
1335 /* We are deleting a mapping, so the flush here is mandatory. */
1336 sun4c_flush_page_hw(tsaddr);
1337 #ifndef CONFIG_SUN4
1338 sun4c_flush_page_hw(tsaddr + PAGE_SIZE);
1339 #endif
1340 sun4c_put_pte(tsaddr, 0);
1341 #ifndef CONFIG_SUN4
1342 sun4c_put_pte(tsaddr + PAGE_SIZE, 0);
1343 #endif
1344 sun4c_bucket[entry] = BUCKET_EMPTY;
1345 if(entry < sun4c_lowbucket_avail)
1346 sun4c_lowbucket_avail = entry;
1348 free_pages(pages, TASK_STRUCT_ORDER);
1349 garbage_collect(entry);
1352 static void sun4c_free_task_struct_sw(struct task_struct *tsk)
1354 unsigned long tsaddr = (unsigned long) tsk;
1355 unsigned long pages = BUCKET_PTE_PAGE(sun4c_get_pte(tsaddr));
1356 int entry = BUCKET_NUM(tsaddr);
1358 /* We are deleting a mapping, so the flush here is mandatory. */
1359 sun4c_flush_page_sw(tsaddr);
1360 #ifndef CONFIG_SUN4
1361 sun4c_flush_page_sw(tsaddr + PAGE_SIZE);
1362 #endif
1363 sun4c_put_pte(tsaddr, 0);
1364 #ifndef CONFIG_SUN4
1365 sun4c_put_pte(tsaddr + PAGE_SIZE, 0);
1366 #endif
1367 sun4c_bucket[entry] = BUCKET_EMPTY;
1368 if(entry < sun4c_lowbucket_avail)
1369 sun4c_lowbucket_avail = entry;
1371 free_pages(pages, TASK_STRUCT_ORDER);
1372 garbage_collect(entry);
1375 static void __init sun4c_init_buckets(void)
1377 int entry;
1379 if(sizeof(union task_union) != (PAGE_SIZE << TASK_STRUCT_ORDER)) {
1380 prom_printf("task union not %d page(s)!\n", 1 << TASK_STRUCT_ORDER);
1382 for(entry = 0; entry < NR_TASKS; entry++)
1383 sun4c_bucket[entry] = BUCKET_EMPTY;
1384 sun4c_lowbucket_avail = 0;
1387 static unsigned long sun4c_iobuffer_start;
1388 static unsigned long sun4c_iobuffer_end;
1389 static unsigned long sun4c_iobuffer_high;
1390 static unsigned long *sun4c_iobuffer_map;
1391 static int iobuffer_map_size;
1394 * Alias our pages so they do not cause a trap.
1395 * Also one page may be aliased into several I/O areas and we may
1396 * finish these I/O separately.
1398 static char *sun4c_lockarea(char *vaddr, unsigned long size)
1400 unsigned long base, scan;
1401 unsigned long npages;
1402 unsigned long vpage;
1403 unsigned long pte;
1404 unsigned long apage;
1405 unsigned long high;
1406 unsigned long flags;
1408 npages = (((unsigned long)vaddr & ~PAGE_MASK) +
1409 size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
1411 scan = 0;
1412 save_and_cli(flags);
1413 for (;;) {
1414 scan = find_next_zero_bit(sun4c_iobuffer_map,
1415 iobuffer_map_size, scan);
1416 if ((base = scan) + npages > iobuffer_map_size) goto abend;
1417 for (;;) {
1418 if (scan >= base + npages) goto found;
1419 if (test_bit(scan, sun4c_iobuffer_map)) break;
1420 scan++;
1424 found:
1425 high = ((base + npages) << PAGE_SHIFT) + sun4c_iobuffer_start;
1426 high = SUN4C_REAL_PGDIR_ALIGN(high);
1427 while (high > sun4c_iobuffer_high) {
1428 get_locked_segment(sun4c_iobuffer_high);
1429 sun4c_iobuffer_high += SUN4C_REAL_PGDIR_SIZE;
1432 vpage = ((unsigned long) vaddr) & PAGE_MASK;
1433 for (scan = base; scan < base+npages; scan++) {
1434 pte = ((vpage-PAGE_OFFSET) >> PAGE_SHIFT);
1435 pte |= pgprot_val(SUN4C_PAGE_KERNEL);
1436 pte |= _SUN4C_PAGE_NOCACHE;
1437 set_bit(scan, sun4c_iobuffer_map);
1438 apage = (scan << PAGE_SHIFT) + sun4c_iobuffer_start;
1440 /* Flush original mapping so we see the right things later. */
1441 sun4c_flush_page(vpage);
1443 sun4c_put_pte(apage, pte);
1444 vpage += PAGE_SIZE;
1446 restore_flags(flags);
1447 return (char *) ((base << PAGE_SHIFT) + sun4c_iobuffer_start +
1448 (((unsigned long) vaddr) & ~PAGE_MASK));
1450 abend:
1451 restore_flags(flags);
1452 printk("DMA vaddr=0x%p size=%08lx\n", vaddr, size);
1453 panic("Out of iobuffer table");
1454 return 0;
1457 static void sun4c_unlockarea(char *vaddr, unsigned long size)
1459 unsigned long vpage, npages;
1460 unsigned long flags;
1461 int scan, high;
1463 vpage = (unsigned long)vaddr & PAGE_MASK;
1464 npages = (((unsigned long)vaddr & ~PAGE_MASK) +
1465 size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
1467 save_and_cli(flags);
1468 while (npages != 0) {
1469 --npages;
1471 /* This mapping is marked non-cachable, no flush necessary. */
1472 sun4c_put_pte(vpage, 0);
1473 clear_bit((vpage - sun4c_iobuffer_start) >> PAGE_SHIFT,
1474 sun4c_iobuffer_map);
1475 vpage += PAGE_SIZE;
1478 /* garbage collect */
1479 scan = (sun4c_iobuffer_high - sun4c_iobuffer_start) >> PAGE_SHIFT;
1480 while (scan >= 0 && !sun4c_iobuffer_map[scan >> 5])
1481 scan -= 32;
1482 scan += 32;
1483 high = sun4c_iobuffer_start + (scan << PAGE_SHIFT);
1484 high = SUN4C_REAL_PGDIR_ALIGN(high) + SUN4C_REAL_PGDIR_SIZE;
1485 while (high < sun4c_iobuffer_high) {
1486 sun4c_iobuffer_high -= SUN4C_REAL_PGDIR_SIZE;
1487 free_locked_segment(sun4c_iobuffer_high);
1489 restore_flags(flags);
1492 /* Note the scsi code at init time passes to here buffers
1493 * which sit on the kernel stack, those are already locked
1494 * by implication and fool the page locking code above
1495 * if passed to by mistake.
1497 static __u32 sun4c_get_scsi_one(char *bufptr, unsigned long len, struct linux_sbus *sbus)
1499 unsigned long page;
1501 page = ((unsigned long)bufptr) & PAGE_MASK;
1502 if(MAP_NR(page) > max_mapnr) {
1503 sun4c_flush_page(page);
1504 return (__u32)bufptr; /* already locked */
1506 return (__u32)sun4c_lockarea(bufptr, len);
1509 static void sun4c_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
1511 while(sz >= 0) {
1512 sg[sz].dvma_addr = (__u32)sun4c_lockarea(sg[sz].addr, sg[sz].len);
1513 sz--;
1517 static void sun4c_release_scsi_one(__u32 bufptr, unsigned long len, struct linux_sbus *sbus)
1519 if(bufptr < sun4c_iobuffer_start)
1520 return; /* On kernel stack or similar, see above */
1521 sun4c_unlockarea((char *)bufptr, len);
1524 static void sun4c_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
1526 while(sz >= 0) {
1527 sun4c_unlockarea((char *)sg[sz].dvma_addr, sg[sz].len);
1528 sz--;
1532 #define TASK_ENTRY_SIZE BUCKET_SIZE /* see above */
1533 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
1535 struct vm_area_struct sun4c_kstack_vma;
1537 static unsigned long __init sun4c_init_lock_areas(unsigned long start_mem)
1539 unsigned long sun4c_taskstack_start;
1540 unsigned long sun4c_taskstack_end;
1541 int bitmap_size;
1543 sun4c_init_buckets();
1544 sun4c_taskstack_start = SUN4C_LOCK_VADDR;
1545 sun4c_taskstack_end = (sun4c_taskstack_start +
1546 (TASK_ENTRY_SIZE * NR_TASKS));
1547 if(sun4c_taskstack_end >= SUN4C_LOCK_END) {
1548 prom_printf("Too many tasks, decrease NR_TASKS please.\n");
1549 prom_halt();
1552 sun4c_iobuffer_start = sun4c_iobuffer_high =
1553 SUN4C_REAL_PGDIR_ALIGN(sun4c_taskstack_end);
1554 sun4c_iobuffer_end = SUN4C_LOCK_END;
1555 bitmap_size = (sun4c_iobuffer_end - sun4c_iobuffer_start) >> PAGE_SHIFT;
1556 bitmap_size = (bitmap_size + 7) >> 3;
1557 bitmap_size = LONG_ALIGN(bitmap_size);
1558 iobuffer_map_size = bitmap_size << 3;
1559 sun4c_iobuffer_map = (unsigned long *) start_mem;
1560 memset((void *) start_mem, 0, bitmap_size);
1561 start_mem += bitmap_size;
1563 sun4c_kstack_vma.vm_mm = &init_mm;
1564 sun4c_kstack_vma.vm_start = sun4c_taskstack_start;
1565 sun4c_kstack_vma.vm_end = sun4c_taskstack_end;
1566 sun4c_kstack_vma.vm_page_prot = PAGE_SHARED;
1567 sun4c_kstack_vma.vm_flags = VM_READ | VM_WRITE | VM_EXEC;
1568 insert_vm_struct(&init_mm, &sun4c_kstack_vma);
1569 return start_mem;
1572 /* Cache flushing on the sun4c. */
1573 static void sun4c_flush_cache_all(void)
1575 unsigned long begin, end;
1577 FUW_INLINE
1578 begin = (KERNBASE + SUN4C_REAL_PGDIR_SIZE);
1579 end = (begin + SUN4C_VAC_SIZE);
1581 if(sun4c_vacinfo.linesize == 32) {
1582 while(begin < end) {
1583 __asm__ __volatile__("
1584 ld [%0 + 0x00], %%g0
1585 ld [%0 + 0x20], %%g0
1586 ld [%0 + 0x40], %%g0
1587 ld [%0 + 0x60], %%g0
1588 ld [%0 + 0x80], %%g0
1589 ld [%0 + 0xa0], %%g0
1590 ld [%0 + 0xc0], %%g0
1591 ld [%0 + 0xe0], %%g0
1592 ld [%0 + 0x100], %%g0
1593 ld [%0 + 0x120], %%g0
1594 ld [%0 + 0x140], %%g0
1595 ld [%0 + 0x160], %%g0
1596 ld [%0 + 0x180], %%g0
1597 ld [%0 + 0x1a0], %%g0
1598 ld [%0 + 0x1c0], %%g0
1599 ld [%0 + 0x1e0], %%g0
1600 " : : "r" (begin));
1601 begin += 512;
1603 } else {
1604 while(begin < end) {
1605 __asm__ __volatile__("
1606 ld [%0 + 0x00], %%g0
1607 ld [%0 + 0x10], %%g0
1608 ld [%0 + 0x20], %%g0
1609 ld [%0 + 0x30], %%g0
1610 ld [%0 + 0x40], %%g0
1611 ld [%0 + 0x50], %%g0
1612 ld [%0 + 0x60], %%g0
1613 ld [%0 + 0x70], %%g0
1614 ld [%0 + 0x80], %%g0
1615 ld [%0 + 0x90], %%g0
1616 ld [%0 + 0xa0], %%g0
1617 ld [%0 + 0xb0], %%g0
1618 ld [%0 + 0xc0], %%g0
1619 ld [%0 + 0xd0], %%g0
1620 ld [%0 + 0xe0], %%g0
1621 ld [%0 + 0xf0], %%g0
1622 " : : "r" (begin));
1623 begin += 256;
1628 static void sun4c_flush_cache_mm_hw(struct mm_struct *mm)
1630 int new_ctx = mm->context;
1632 if(new_ctx != NO_CONTEXT && sun4c_context_ring[new_ctx].num_entries) {
1633 struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
1634 unsigned long flags;
1636 save_and_cli(flags);
1637 if(head->next != head) {
1638 struct sun4c_mmu_entry *entry = head->next;
1639 int savectx = sun4c_get_context();
1641 FUW_INLINE
1642 sun4c_set_context(new_ctx);
1643 sun4c_flush_context_hw();
1644 do {
1645 struct sun4c_mmu_entry *next = entry->next;
1647 sun4c_user_unmap(entry);
1648 free_user_entry(new_ctx, entry);
1650 entry = next;
1651 } while(entry != head);
1652 sun4c_set_context(savectx);
1654 restore_flags(flags);
1658 static void sun4c_flush_cache_range_hw(struct mm_struct *mm, unsigned long start, unsigned long end)
1660 int new_ctx = mm->context;
1662 #if KGPROF_PROFILING
1663 kgprof_profile();
1664 #endif
1665 if(new_ctx != NO_CONTEXT) {
1666 struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
1667 struct sun4c_mmu_entry *entry;
1668 unsigned long flags;
1670 FUW_INLINE
1671 save_and_cli(flags);
1673 /* All user segmap chains are ordered on entry->vaddr. */
1674 for(entry = head->next;
1675 (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
1676 entry = entry->next)
1679 /* Tracing various job mixtures showed that this conditional
1680 * only passes ~35% of the time for most worse case situations,
1681 * therefore we avoid all of this gross overhead ~65% of the time.
1683 if((entry != head) && (entry->vaddr < end)) {
1684 int octx = sun4c_get_context();
1685 sun4c_set_context(new_ctx);
1687 /* At this point, always, (start >= entry->vaddr) and
1688 * (entry->vaddr < end), once the latter condition
1689 * ceases to hold, or we hit the end of the list, we
1690 * exit the loop. The ordering of all user allocated
1691 * segmaps makes this all work out so beautifully.
1693 do {
1694 struct sun4c_mmu_entry *next = entry->next;
1695 unsigned long realend;
1697 /* "realstart" is always >= entry->vaddr */
1698 realend = entry->vaddr + SUN4C_REAL_PGDIR_SIZE;
1699 if(end < realend)
1700 realend = end;
1701 if((realend - entry->vaddr) <= (PAGE_SIZE << 3)) {
1702 unsigned long page = entry->vaddr;
1703 while(page < realend) {
1704 sun4c_flush_page_hw(page);
1705 page += PAGE_SIZE;
1707 } else {
1708 sun4c_flush_segment_hw(entry->vaddr);
1709 sun4c_user_unmap(entry);
1710 free_user_entry(new_ctx, entry);
1712 entry = next;
1713 } while((entry != head) && (entry->vaddr < end));
1714 sun4c_set_context(octx);
1716 restore_flags(flags);
1720 /* XXX no save_and_cli/restore_flags needed, but put here if darkside still crashes */
1721 static void sun4c_flush_cache_page_hw(struct vm_area_struct *vma, unsigned long page)
1723 struct mm_struct *mm = vma->vm_mm;
1724 int new_ctx = mm->context;
1726 /* Sun4c has no separate I/D caches so cannot optimize for non
1727 * text page flushes.
1729 if(new_ctx != NO_CONTEXT) {
1730 int octx = sun4c_get_context();
1732 FUW_INLINE
1733 sun4c_set_context(new_ctx);
1734 sun4c_flush_page_hw(page);
1735 sun4c_set_context(octx);
1739 static void sun4c_flush_page_to_ram_hw(unsigned long page)
1741 sun4c_flush_page_hw(page);
1744 static void sun4c_flush_cache_mm_sw(struct mm_struct *mm)
1746 int new_ctx = mm->context;
1748 if(new_ctx != NO_CONTEXT && sun4c_context_ring[new_ctx].num_entries) {
1749 struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
1750 unsigned long flags;
1752 save_and_cli(flags);
1753 if(head->next != head) {
1754 struct sun4c_mmu_entry *entry = head->next;
1755 int savectx = sun4c_get_context();
1757 FUW_INLINE
1758 sun4c_set_context(new_ctx);
1759 sun4c_flush_context_sw();
1760 do {
1761 struct sun4c_mmu_entry *next = entry->next;
1763 sun4c_user_unmap(entry);
1764 free_user_entry(new_ctx, entry);
1766 entry = next;
1767 } while(entry != head);
1768 sun4c_set_context(savectx);
1770 restore_flags(flags);
1774 static void sun4c_flush_cache_range_sw(struct mm_struct *mm, unsigned long start, unsigned long end)
1776 int new_ctx = mm->context;
1778 #if KGPROF_PROFILING
1779 kgprof_profile();
1780 #endif
1781 if(new_ctx != NO_CONTEXT) {
1782 struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
1783 struct sun4c_mmu_entry *entry;
1784 unsigned long flags;
1786 FUW_INLINE
1787 save_and_cli(flags);
1788 /* All user segmap chains are ordered on entry->vaddr. */
1789 for(entry = head->next;
1790 (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
1791 entry = entry->next)
1794 /* Tracing various job mixtures showed that this conditional
1795 * only passes ~35% of the time for most worse case situations,
1796 * therefore we avoid all of this gross overhead ~65% of the time.
1798 if((entry != head) && (entry->vaddr < end)) {
1799 int octx = sun4c_get_context();
1800 sun4c_set_context(new_ctx);
1802 /* At this point, always, (start >= entry->vaddr) and
1803 * (entry->vaddr < end), once the latter condition
1804 * ceases to hold, or we hit the end of the list, we
1805 * exit the loop. The ordering of all user allocated
1806 * segmaps makes this all work out so beautifully.
1808 do {
1809 struct sun4c_mmu_entry *next = entry->next;
1810 unsigned long realend;
1812 /* "realstart" is always >= entry->vaddr */
1813 realend = entry->vaddr + SUN4C_REAL_PGDIR_SIZE;
1814 if(end < realend)
1815 realend = end;
1816 if((realend - entry->vaddr) <= (PAGE_SIZE << 3)) {
1817 unsigned long page = entry->vaddr;
1818 while(page < realend) {
1819 sun4c_flush_page_sw(page);
1820 page += PAGE_SIZE;
1822 } else {
1823 sun4c_flush_segment_sw(entry->vaddr);
1824 sun4c_user_unmap(entry);
1825 free_user_entry(new_ctx, entry);
1827 entry = next;
1828 } while((entry != head) && (entry->vaddr < end));
1829 sun4c_set_context(octx);
1831 restore_flags(flags);
1835 static void sun4c_flush_cache_page_sw(struct vm_area_struct *vma, unsigned long page)
1837 struct mm_struct *mm = vma->vm_mm;
1838 int new_ctx = mm->context;
1840 /* Sun4c has no separate I/D caches so cannot optimize for non
1841 * text page flushes.
1843 if(new_ctx != NO_CONTEXT) {
1844 int octx = sun4c_get_context();
1846 FUW_INLINE
1847 sun4c_set_context(new_ctx);
1848 sun4c_flush_page_sw(page);
1849 sun4c_set_context(octx);
1853 static void sun4c_flush_page_to_ram_sw(unsigned long page)
1855 sun4c_flush_page_sw(page);
1858 /* Sun4c cache is unified, both instructions and data live there, so
1859 * no need to flush the on-stack instructions for new signal handlers.
1861 static void sun4c_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1865 /* TLB flushing on the sun4c. These routines count on the cache
1866 * flushing code to flush the user register windows so that we need
1867 * not do so when we get here.
1870 static void sun4c_flush_tlb_all(void)
1872 struct sun4c_mmu_entry *this_entry, *next_entry;
1873 unsigned long flags;
1874 int savectx, ctx;
1876 save_and_cli(flags);
1877 this_entry = sun4c_kernel_ring.ringhd.next;
1878 savectx = sun4c_get_context();
1879 flush_user_windows();
1880 while (sun4c_kernel_ring.num_entries) {
1881 next_entry = this_entry->next;
1882 sun4c_flush_segment(this_entry->vaddr);
1883 for(ctx = 0; ctx < num_contexts; ctx++) {
1884 sun4c_set_context(ctx);
1885 sun4c_put_segmap(this_entry->vaddr, invalid_segment);
1887 free_kernel_entry(this_entry, &sun4c_kernel_ring);
1888 this_entry = next_entry;
1890 sun4c_set_context(savectx);
1891 restore_flags(flags);
1894 static void sun4c_flush_tlb_mm_hw(struct mm_struct *mm)
1896 int new_ctx = mm->context;
1898 if(new_ctx != NO_CONTEXT) {
1899 struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
1900 unsigned long flags;
1902 save_and_cli(flags);
1903 if(head->next != head) {
1904 struct sun4c_mmu_entry *entry = head->next;
1905 int savectx = sun4c_get_context();
1907 FUW_INLINE
1908 sun4c_set_context(new_ctx);
1909 sun4c_flush_context_hw();
1910 do {
1911 struct sun4c_mmu_entry *next = entry->next;
1913 sun4c_user_unmap(entry);
1914 free_user_entry(new_ctx, entry);
1916 entry = next;
1917 } while(entry != head);
1918 sun4c_set_context(savectx);
1920 restore_flags(flags);
1924 static void sun4c_flush_tlb_range_hw(struct mm_struct *mm, unsigned long start, unsigned long end)
1926 int new_ctx = mm->context;
1928 if(new_ctx != NO_CONTEXT) {
1929 struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
1930 struct sun4c_mmu_entry *entry;
1931 unsigned long flags;
1932 #if KGPROF_PROFILING
1933 kgprof_profile();
1934 #endif
1936 save_and_cli(flags);
1937 /* See commentary in sun4c_flush_cache_range_*(). */
1938 for(entry = head->next;
1939 (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
1940 entry = entry->next)
1943 if((entry != head) && (entry->vaddr < end)) {
1944 int octx = sun4c_get_context();
1946 /* This window flush is paranoid I think... -DaveM */
1947 FUW_INLINE
1948 sun4c_set_context(new_ctx);
1949 do {
1950 struct sun4c_mmu_entry *next = entry->next;
1952 sun4c_flush_segment_hw(entry->vaddr);
1953 sun4c_user_unmap(entry);
1954 free_user_entry(new_ctx, entry);
1956 entry = next;
1957 } while((entry != head) && (entry->vaddr < end));
1958 sun4c_set_context(octx);
1960 restore_flags(flags);
1964 static void sun4c_flush_tlb_page_hw(struct vm_area_struct *vma, unsigned long page)
1966 struct mm_struct *mm = vma->vm_mm;
1967 int new_ctx = mm->context;
1969 if(new_ctx != NO_CONTEXT) {
1970 int savectx = sun4c_get_context();
1972 FUW_INLINE
1973 sun4c_set_context(new_ctx);
1974 page &= PAGE_MASK;
1975 sun4c_flush_page_hw(page);
1976 sun4c_put_pte(page, 0);
1977 sun4c_set_context(savectx);
1981 static void sun4c_flush_tlb_mm_sw(struct mm_struct *mm)
1983 int new_ctx = mm->context;
1985 if(new_ctx != NO_CONTEXT) {
1986 struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
1987 unsigned long flags;
1989 save_and_cli(flags);
1990 if(head->next != head) {
1991 struct sun4c_mmu_entry *entry = head->next;
1992 int savectx = sun4c_get_context();
1994 FUW_INLINE
1995 sun4c_set_context(new_ctx);
1996 sun4c_flush_context_sw();
1997 do {
1998 struct sun4c_mmu_entry *next = entry->next;
2000 sun4c_user_unmap(entry);
2001 free_user_entry(new_ctx, entry);
2003 entry = next;
2004 } while(entry != head);
2005 sun4c_set_context(savectx);
2007 restore_flags(flags);
2011 static void sun4c_flush_tlb_range_sw(struct mm_struct *mm, unsigned long start, unsigned long end)
2013 int new_ctx = mm->context;
2015 if(new_ctx != NO_CONTEXT) {
2016 struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
2017 struct sun4c_mmu_entry *entry;
2018 unsigned long flags;
2020 #if KGPROF_PROFILING
2021 kgprof_profile();
2022 #endif
2024 save_and_cli(flags);
2025 /* See commentary in sun4c_flush_cache_range_*(). */
2026 for(entry = head->next;
2027 (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
2028 entry = entry->next)
2031 if((entry != head) && (entry->vaddr < end)) {
2032 int octx = sun4c_get_context();
2034 /* This window flush is paranoid I think... -DaveM */
2035 FUW_INLINE
2036 sun4c_set_context(new_ctx);
2037 do {
2038 struct sun4c_mmu_entry *next = entry->next;
2040 sun4c_flush_segment_sw(entry->vaddr);
2041 sun4c_user_unmap(entry);
2042 free_user_entry(new_ctx, entry);
2044 entry = next;
2045 } while((entry != head) && (entry->vaddr < end));
2046 sun4c_set_context(octx);
2048 restore_flags(flags);
2052 static void sun4c_flush_tlb_page_sw(struct vm_area_struct *vma, unsigned long page)
2054 struct mm_struct *mm = vma->vm_mm;
2055 int new_ctx = mm->context;
2057 if(new_ctx != NO_CONTEXT) {
2058 int savectx = sun4c_get_context();
2060 FUW_INLINE
2061 sun4c_set_context(new_ctx);
2062 page &= PAGE_MASK;
2063 sun4c_flush_page_sw(page);
2064 sun4c_put_pte(page, 0);
2065 sun4c_set_context(savectx);
2069 static void sun4c_set_pte(pte_t *ptep, pte_t pte)
2071 *ptep = pte;
2074 static void sun4c_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
2079 void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr,
2080 int bus_type, int rdonly)
2082 unsigned long page_entry;
2084 page_entry = ((physaddr >> PAGE_SHIFT) & SUN4C_PFN_MASK);
2085 page_entry |= ((pg_iobits | _SUN4C_PAGE_PRIV) & ~(_SUN4C_PAGE_PRESENT));
2086 if(rdonly)
2087 page_entry &= ~_SUN4C_WRITEABLE;
2088 sun4c_put_pte(virt_addr, page_entry);
2091 void sun4c_unmapioaddr(unsigned long virt_addr)
2093 sun4c_put_pte(virt_addr, 0);
2096 static void sun4c_alloc_context_hw(struct mm_struct *mm)
2098 struct ctx_list *ctxp;
2100 ctxp = ctx_free.next;
2101 if(ctxp != &ctx_free) {
2102 remove_from_ctx_list(ctxp);
2103 add_to_used_ctxlist(ctxp);
2104 mm->context = ctxp->ctx_number;
2105 ctxp->ctx_mm = mm;
2106 return;
2108 ctxp = ctx_used.next;
2109 if(ctxp->ctx_mm == current->mm)
2110 ctxp = ctxp->next;
2111 #ifdef DEBUG_SUN4C_MM
2112 if(ctxp == &ctx_used)
2113 panic("out of mmu contexts");
2114 #endif
2115 remove_from_ctx_list(ctxp);
2116 add_to_used_ctxlist(ctxp);
2117 ctxp->ctx_mm->context = NO_CONTEXT;
2118 ctxp->ctx_mm = mm;
2119 mm->context = ctxp->ctx_number;
2120 sun4c_demap_context_hw(&sun4c_context_ring[ctxp->ctx_number],
2121 ctxp->ctx_number);
2124 static void sun4c_switch_to_context_hw(struct task_struct *tsk)
2126 struct ctx_list *ctx;
2128 if(tsk->mm->context == NO_CONTEXT) {
2129 sun4c_alloc_context_hw(tsk->mm);
2130 } else {
2131 /* Update the LRU ring of contexts. */
2132 ctx = ctx_list_pool + tsk->mm->context;
2133 remove_from_ctx_list(ctx);
2134 add_to_used_ctxlist(ctx);
2136 sun4c_set_context(tsk->mm->context);
2139 static void sun4c_init_new_context_hw(struct mm_struct *mm)
2141 sun4c_alloc_context_hw(mm);
2142 if(mm == current->mm)
2143 sun4c_set_context(mm->context);
2146 static void sun4c_destroy_context_hw(struct mm_struct *mm)
2148 struct ctx_list *ctx_old;
2150 if(mm->context != NO_CONTEXT && atomic_read(&mm->count) == 1) {
2151 sun4c_demap_context_hw(&sun4c_context_ring[mm->context], mm->context);
2152 ctx_old = ctx_list_pool + mm->context;
2153 remove_from_ctx_list(ctx_old);
2154 add_to_free_ctxlist(ctx_old);
2155 mm->context = NO_CONTEXT;
2159 static void sun4c_alloc_context_sw(struct mm_struct *mm)
2161 struct ctx_list *ctxp;
2163 ctxp = ctx_free.next;
2164 if(ctxp != &ctx_free) {
2165 remove_from_ctx_list(ctxp);
2166 add_to_used_ctxlist(ctxp);
2167 mm->context = ctxp->ctx_number;
2168 ctxp->ctx_mm = mm;
2169 return;
2171 ctxp = ctx_used.next;
2172 if(ctxp->ctx_mm == current->mm)
2173 ctxp = ctxp->next;
2174 #ifdef DEBUG_SUN4C_MM
2175 if(ctxp == &ctx_used)
2176 panic("out of mmu contexts");
2177 #endif
2178 remove_from_ctx_list(ctxp);
2179 add_to_used_ctxlist(ctxp);
2180 ctxp->ctx_mm->context = NO_CONTEXT;
2181 ctxp->ctx_mm = mm;
2182 mm->context = ctxp->ctx_number;
2183 sun4c_demap_context_sw(&sun4c_context_ring[ctxp->ctx_number],
2184 ctxp->ctx_number);
2187 static void sun4c_switch_to_context_sw(struct task_struct *tsk)
2189 struct ctx_list *ctx;
2191 if(tsk->mm->context == NO_CONTEXT) {
2192 sun4c_alloc_context_sw(tsk->mm);
2193 } else {
2194 /* Update the LRU ring of contexts. */
2195 ctx = ctx_list_pool + tsk->mm->context;
2196 remove_from_ctx_list(ctx);
2197 add_to_used_ctxlist(ctx);
2199 sun4c_set_context(tsk->mm->context);
2202 static void sun4c_init_new_context_sw(struct mm_struct *mm)
2204 sun4c_alloc_context_sw(mm);
2205 if(mm == current->mm)
2206 sun4c_set_context(mm->context);
2209 static void sun4c_destroy_context_sw(struct mm_struct *mm)
2211 struct ctx_list *ctx_old;
2213 if(mm->context != NO_CONTEXT && atomic_read(&mm->count) == 1) {
2214 sun4c_demap_context_sw(&sun4c_context_ring[mm->context], mm->context);
2215 ctx_old = ctx_list_pool + mm->context;
2216 remove_from_ctx_list(ctx_old);
2217 add_to_free_ctxlist(ctx_old);
2218 mm->context = NO_CONTEXT;
2222 static int sun4c_mmu_info(char *buf)
2224 int used_user_entries, i;
2225 int len;
2227 used_user_entries = 0;
2228 for(i=0; i < num_contexts; i++)
2229 used_user_entries += sun4c_context_ring[i].num_entries;
2231 len = sprintf(buf,
2232 "vacsize\t\t: %d bytes\n"
2233 "vachwflush\t: %s\n"
2234 "vaclinesize\t: %d bytes\n"
2235 "mmuctxs\t\t: %d\n"
2236 "mmupsegs\t: %d\n"
2237 "kernelpsegs\t: %d\n"
2238 "kfreepsegs\t: %d\n"
2239 "usedpsegs\t: %d\n"
2240 "ufreepsegs\t: %d\n"
2241 "user_taken\t: %d\n"
2242 "max_taken\t: %d\n"
2243 "context\t\t: %d flushes\n"
2244 "segment\t\t: %d flushes\n"
2245 "page\t\t: %d flushes\n",
2246 sun4c_vacinfo.num_bytes,
2247 (sun4c_vacinfo.do_hwflushes ? "yes" : "no"),
2248 sun4c_vacinfo.linesize,
2249 num_contexts,
2250 (invalid_segment + 1),
2251 sun4c_kernel_ring.num_entries,
2252 sun4c_kfree_ring.num_entries,
2253 used_user_entries,
2254 sun4c_ufree_ring.num_entries,
2255 sun4c_user_taken_entries,
2256 max_user_taken_entries,
2257 ctxflushes, segflushes, pageflushes);
2259 #if KGPROF_PROFILING
2261 int i,j;
2262 len += sprintf(buf + len,"kgprof profiling:\n");
2263 for (i=0;i<KGPROF_SIZE && kgprof_counters[i].addr[0];i++) {
2264 len += sprintf(buf + len,"%5d ",kgprof_counters[i].count);
2265 for (j=0;j<KGPROF_DEPTH;j++) {
2266 len += sprintf(buf + len,"%08x ",kgprof_counters[i].addr[j]);
2268 len += sprintf(buf + len,"\n");
2271 #endif
2273 return len;
2276 /* Nothing below here should touch the mmu hardware nor the mmu_entry
2277 * data structures.
2280 #if 0 /* Not used due to BTFIXUPs */
2281 static unsigned int sun4c_pmd_align(unsigned int addr) { return SUN4C_PMD_ALIGN(addr); }
2282 #endif
2283 #if 0 /* Not used due to BTFIXUPs */
2284 static unsigned int sun4c_pgdir_align(unsigned int addr) { return SUN4C_PGDIR_ALIGN(addr); }
2285 #endif
2287 /* First the functions which the mid-level code uses to directly
2288 * manipulate the software page tables. Some defines since we are
2289 * emulating the i386 page directory layout.
2291 #define PGD_PRESENT 0x001
2292 #define PGD_RW 0x002
2293 #define PGD_USER 0x004
2294 #define PGD_ACCESSED 0x020
2295 #define PGD_DIRTY 0x040
2296 #define PGD_TABLE (PGD_PRESENT | PGD_RW | PGD_USER | PGD_ACCESSED | PGD_DIRTY)
2298 #if 0 /* Not used due to BTFIXUPs */
2299 static unsigned long sun4c_vmalloc_start(void)
2301 return SUN4C_VMALLOC_START;
2303 #endif
2305 #if 0 /* Not used due to BTFIXUPs */
2306 static int sun4c_pte_none(pte_t pte) { return !pte_val(pte); }
2307 #endif
2309 static int sun4c_pte_present(pte_t pte)
2311 return ((pte_val(pte) & (_SUN4C_PAGE_PRESENT | _SUN4C_PAGE_PRIV)) != 0);
2313 static void sun4c_pte_clear(pte_t *ptep) { *ptep = __pte(0); }
2315 static int sun4c_pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
2316 static int sun4c_pmd_bad(pmd_t pmd)
2318 return (((pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE) ||
2319 (MAP_NR(pmd_val(pmd)) > max_mapnr));
2322 static int sun4c_pmd_present(pmd_t pmd)
2324 return ((pmd_val(pmd) & PGD_PRESENT) != 0);
2326 static void sun4c_pmd_clear(pmd_t *pmdp) { *pmdp = __pmd(0); }
2328 static int sun4c_pgd_none(pgd_t pgd) { return 0; }
2329 static int sun4c_pgd_bad(pgd_t pgd) { return 0; }
2330 static int sun4c_pgd_present(pgd_t pgd) { return 1; }
2331 static void sun4c_pgd_clear(pgd_t * pgdp) { }
2334 * The following only work if pte_present() is true.
2335 * Undefined behaviour if not..
2337 #if 0 /* Not used due to BTFIXUPs */
2338 static int sun4c_pte_write(pte_t pte)
2340 return pte_val(pte) & _SUN4C_PAGE_WRITE;
2342 #endif
2344 #if 0 /* Not used due to BTFIXUPs */
2345 static int sun4c_pte_dirty(pte_t pte)
2347 return pte_val(pte) & _SUN4C_PAGE_MODIFIED;
2349 #endif
2351 #if 0 /* Not used due to BTFIXUPs */
2352 static int sun4c_pte_young(pte_t pte)
2354 return pte_val(pte) & _SUN4C_PAGE_ACCESSED;
2356 #endif
2358 #if 0 /* Not used due to BTFIXUPs */
2359 static pte_t sun4c_pte_wrprotect(pte_t pte)
2361 return __pte(pte_val(pte) & ~(_SUN4C_PAGE_WRITE | _SUN4C_PAGE_SILENT_WRITE));
2363 #endif
2365 #if 0 /* Not used due to BTFIXUPs */
2366 static pte_t sun4c_pte_mkclean(pte_t pte)
2368 return __pte(pte_val(pte) & ~(_SUN4C_PAGE_MODIFIED | _SUN4C_PAGE_SILENT_WRITE));
2370 #endif
2372 #if 0 /* Not used due to BTFIXUPs */
2373 static pte_t sun4c_pte_mkold(pte_t pte)
2375 return __pte(pte_val(pte) & ~(_SUN4C_PAGE_ACCESSED | _SUN4C_PAGE_SILENT_READ));
2377 #endif
2379 static pte_t sun4c_pte_mkwrite(pte_t pte)
2381 pte = __pte(pte_val(pte) | _SUN4C_PAGE_WRITE);
2382 if (pte_val(pte) & _SUN4C_PAGE_MODIFIED)
2383 pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_WRITE);
2384 return pte;
2387 static pte_t sun4c_pte_mkdirty(pte_t pte)
2389 pte = __pte(pte_val(pte) | _SUN4C_PAGE_MODIFIED);
2390 if (pte_val(pte) & _SUN4C_PAGE_WRITE)
2391 pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_WRITE);
2392 return pte;
2395 static pte_t sun4c_pte_mkyoung(pte_t pte)
2397 pte = __pte(pte_val(pte) | _SUN4C_PAGE_ACCESSED);
2398 if (pte_val(pte) & _SUN4C_PAGE_READ)
2399 pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_READ);
2400 return pte;
2404 * Conversion functions: convert a page and protection to a page entry,
2405 * and a page entry and page directory to the page they refer to.
2407 static pte_t sun4c_mk_pte(unsigned long page, pgprot_t pgprot)
2409 return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot));
2412 static pte_t sun4c_mk_pte_phys(unsigned long phys_page, pgprot_t pgprot)
2414 return __pte((phys_page >> PAGE_SHIFT) | pgprot_val(pgprot));
2417 static pte_t sun4c_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
2419 return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot));
2422 #if 0 /* Not used due to BTFIXUPs */
2423 static pte_t sun4c_pte_modify(pte_t pte, pgprot_t newprot)
2425 return __pte((pte_val(pte) & _SUN4C_PAGE_CHG_MASK) |
2426 pgprot_val(newprot));
2428 #endif
2430 static unsigned long sun4c_pte_page(pte_t pte)
2432 return (PAGE_OFFSET + ((pte_val(pte) & SUN4C_PFN_MASK) << (PAGE_SHIFT)));
2435 static inline unsigned long sun4c_pmd_page(pmd_t pmd)
2437 return (pmd_val(pmd) & PAGE_MASK);
2440 static unsigned long sun4c_pgd_page(pgd_t pgd)
2442 return 0;
2445 /* to find an entry in a page-table-directory */
2446 pgd_t *sun4c_pgd_offset(struct mm_struct * mm, unsigned long address)
2448 return mm->pgd + (address >> SUN4C_PGDIR_SHIFT);
2451 /* Find an entry in the second-level page table.. */
2452 static pmd_t *sun4c_pmd_offset(pgd_t * dir, unsigned long address)
2454 return (pmd_t *) dir;
2457 /* Find an entry in the third-level page table.. */
2458 pte_t *sun4c_pte_offset(pmd_t * dir, unsigned long address)
2460 return (pte_t *) sun4c_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1));
2463 /* Update the root mmu directory. */
2464 static void sun4c_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdir)
2468 /* Please take special note on the foo_kernel() routines below, our
2469 * fast in window fault handler wants to get at the pte's for vmalloc
2470 * area with traps off, therefore they _MUST_ be locked down to prevent
2471 * a watchdog from happening. It only takes 4 pages of pte's to lock
2472 * down the maximum vmalloc space possible on sun4c so we statically
2473 * allocate these page table pieces in the kernel image. Therefore
2474 * we should never have to really allocate or free any kernel page
2475 * table information.
2478 /* Allocate and free page tables. The xxx_kernel() versions are
2479 * used to allocate a kernel page table - this turns on ASN bits
2480 * if any, and marks the page tables reserved.
2482 static void sun4c_pte_free_kernel(pte_t *pte)
2484 /* This should never get called. */
2485 panic("sun4c_pte_free_kernel called, can't happen...");
2488 static pte_t *sun4c_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
2490 if(address >= SUN4C_LOCK_VADDR)
2491 return NULL;
2492 address = (address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1);
2493 if (sun4c_pmd_none(*pmd))
2494 panic("sun4c_pmd_none for kernel pmd, can't happen...");
2495 if (sun4c_pmd_bad(*pmd)) {
2496 printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
2497 *pmd = __pmd(PGD_TABLE | (unsigned long) BAD_PAGETABLE);
2498 return NULL;
2500 return (pte_t *) sun4c_pmd_page(*pmd) + address;
2503 static void sun4c_free_pte_slow(pte_t *pte)
2505 free_page((unsigned long)pte);
2508 static void sun4c_free_pgd_slow(pgd_t *pgd)
2510 free_page((unsigned long)pgd);
2514 * allocating and freeing a pmd is trivial: the 1-entry pmd is
2515 * inside the pgd, so has no extra memory associated with it.
2517 static void sun4c_pmd_free_kernel(pmd_t *pmd)
2521 static pmd_t *sun4c_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
2523 return (pmd_t *) pgd;
2526 extern __inline__ pgd_t *sun4c_get_pgd_fast(void)
2528 unsigned long *ret;
2530 if((ret = pgd_quicklist) != NULL) {
2531 pgd_quicklist = (unsigned long *)(*ret);
2532 ret[0] = ret[1];
2533 pgtable_cache_size--;
2534 } else {
2535 pgd_t *init;
2537 ret = (unsigned long *)__get_free_page(GFP_KERNEL);
2538 memset (ret, 0, (KERNBASE / SUN4C_PGDIR_SIZE) * sizeof(pgd_t));
2539 init = pgd_offset(&init_mm, 0);
2540 memcpy (((pgd_t *)ret) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
2541 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
2543 return (pgd_t *)ret;
2546 static int sun4c_check_pgt_cache(int low, int high)
2548 int freed = 0;
2549 if(pgtable_cache_size > high) {
2550 do {
2551 if(pgd_quicklist)
2552 free_pgd_slow(get_pgd_fast()), freed++;
2553 if(pmd_quicklist)
2554 free_pmd_slow(get_pmd_fast()), freed++;
2555 if(pte_quicklist)
2556 free_pte_slow(get_pte_fast()), freed++;
2557 } while(pgtable_cache_size > low);
2559 return freed;
2562 static void sun4c_set_pgdir(unsigned long address, pgd_t entry)
2564 /* Nothing to do */
2567 extern __inline__ void sun4c_free_pgd_fast(pgd_t *pgd)
2569 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
2570 pgd_quicklist = (unsigned long *) pgd;
2571 pgtable_cache_size++;
2574 extern __inline__ pte_t *sun4c_get_pte_fast(void)
2576 unsigned long *ret;
2578 if((ret = (unsigned long *)pte_quicklist) != NULL) {
2579 pte_quicklist = (unsigned long *)(*ret);
2580 ret[0] = ret[1];
2581 pgtable_cache_size--;
2583 return (pte_t *)ret;
2586 extern __inline__ void sun4c_free_pte_fast(pte_t *pte)
2588 *(unsigned long *)pte = (unsigned long) pte_quicklist;
2589 pte_quicklist = (unsigned long *) pte;
2590 pgtable_cache_size++;
2593 static void sun4c_pte_free(pte_t *pte)
2595 sun4c_free_pte_fast(pte);
2598 static pte_t *sun4c_pte_alloc(pmd_t * pmd, unsigned long address)
2600 address = (address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1);
2601 if (sun4c_pmd_none(*pmd)) {
2602 pte_t *page = (pte_t *) sun4c_get_pte_fast();
2604 if (page) {
2605 *pmd = __pmd(PGD_TABLE | (unsigned long) page);
2606 return page + address;
2608 page = (pte_t *) get_free_page(GFP_KERNEL);
2609 if (sun4c_pmd_none(*pmd)) {
2610 if (page) {
2611 *pmd = __pmd(PGD_TABLE | (unsigned long) page);
2612 return page + address;
2614 *pmd = __pmd(PGD_TABLE | (unsigned long) BAD_PAGETABLE);
2615 return NULL;
2617 free_page((unsigned long) page);
2619 if (sun4c_pmd_bad(*pmd)) {
2620 printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
2621 *pmd = __pmd(PGD_TABLE | (unsigned long) BAD_PAGETABLE);
2622 return NULL;
2624 return (pte_t *) sun4c_pmd_page(*pmd) + address;
2627 static pte_t *sun4c_pte_get(void)
2629 return sun4c_get_pte_fast();
2633 * allocating and freeing a pmd is trivial: the 1-entry pmd is
2634 * inside the pgd, so has no extra memory associated with it.
2636 static void sun4c_pmd_free(pmd_t * pmd)
2640 static pmd_t *sun4c_pmd_alloc(pgd_t * pgd, unsigned long address)
2642 return (pmd_t *) pgd;
2645 static void sun4c_pgd_free(pgd_t *pgd)
2647 sun4c_free_pgd_fast(pgd);
2650 static pgd_t *sun4c_pgd_alloc(void)
2652 return sun4c_get_pgd_fast();
2655 /* There are really two cases of aliases to watch out for, and these
2656 * are:
2658 * 1) A user's page which can be aliased with the kernels virtual
2659 * mapping of the physical page.
2661 * 2) Multiple user mappings of the same inode/anonymous object
2662 * such that two copies of the same data for the same phys page
2663 * can live (writable) in the cache at the same time.
2665 * We handle number 1 by flushing the kernel copy of the page always
2666 * after COW page operations.
2668 * NOTE: We are a bit slowed down now because the VMA arg is indeed used
2669 * now, so our ref/mod bit tracking quick userfaults eat a few more
2670 * cycles than they used to.
2672 static void sun4c_vac_alias_fixup(struct vm_area_struct *vma, unsigned long address, pte_t pte)
2674 struct dentry *dentry = NULL;
2675 struct inode *inode = NULL;
2676 pgd_t *pgdp;
2677 pte_t *ptep;
2679 if (vma->vm_file)
2680 dentry = vma->vm_file->f_dentry;
2681 if(dentry)
2682 inode = dentry->d_inode;
2683 if(inode) {
2684 unsigned long offset = (address & PAGE_MASK) - vma->vm_start;
2685 struct vm_area_struct *vmaring;
2686 int alias_found = 0;
2687 spin_lock(&inode->i_shared_lock);
2688 vmaring = inode->i_mmap;
2689 do {
2690 unsigned long vaddr = vmaring->vm_start + offset;
2691 unsigned long start;
2693 /* Do not mistake ourselves as another mapping. */
2694 if(vmaring == vma)
2695 continue;
2697 if (S4CVAC_BADALIAS(vaddr, address)) {
2698 alias_found++;
2699 start = vmaring->vm_start;
2700 while(start < vmaring->vm_end) {
2701 pgdp = sun4c_pgd_offset(vmaring->vm_mm, start);
2702 if(!pgdp) goto next;
2703 ptep = sun4c_pte_offset((pmd_t *) pgdp, start);
2704 if(!ptep) goto next;
2706 if(pte_val(*ptep) & _SUN4C_PAGE_PRESENT) {
2707 flush_cache_page(vmaring, start);
2708 *ptep = __pte(pte_val(*ptep) |
2709 _SUN4C_PAGE_NOCACHE);
2710 flush_tlb_page(vmaring, start);
2712 next:
2713 start += PAGE_SIZE;
2716 } while ((vmaring = vmaring->vm_next_share) != NULL);
2717 spin_unlock(&inode->i_shared_lock);
2719 if(alias_found && !(pte_val(pte) & _SUN4C_PAGE_NOCACHE)) {
2720 pgdp = sun4c_pgd_offset(vma->vm_mm, address);
2721 ptep = sun4c_pte_offset((pmd_t *) pgdp, address);
2722 *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_NOCACHE);
2723 pte = pte_val(*ptep);
2728 void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
2730 unsigned long flags;
2732 save_and_cli(flags);
2733 address &= PAGE_MASK;
2734 if(sun4c_get_segmap(address) == invalid_segment)
2735 alloc_user_segment(address, sun4c_get_context());
2737 if((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))
2738 sun4c_vac_alias_fixup(vma, address, pte);
2740 sun4c_put_pte(address, pte_val(pte));
2741 restore_flags(flags);
2744 extern unsigned long free_area_init(unsigned long, unsigned long);
2745 extern unsigned long sparc_context_init(unsigned long, int);
2746 extern unsigned long end;
2748 unsigned long __init sun4c_paging_init(unsigned long start_mem, unsigned long end_mem)
2750 int i, cnt;
2751 unsigned long kernel_end, vaddr;
2752 extern unsigned long sparc_iobase_vaddr;
2754 kernel_end = (unsigned long) &end;
2755 kernel_end += (SUN4C_REAL_PGDIR_SIZE * 4);
2756 kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end);
2757 sun4c_probe_mmu();
2758 invalid_segment = (num_segmaps - 1);
2759 sun4c_init_mmu_entry_pool();
2760 sun4c_init_rings(&start_mem);
2761 sun4c_init_map_kernelprom(kernel_end);
2762 sun4c_init_clean_mmu(kernel_end);
2763 sun4c_init_fill_kernel_ring(SUN4C_KERNEL_BUCKETS);
2764 sun4c_init_lock_area(sparc_iobase_vaddr, IOBASE_END);
2765 sun4c_init_lock_area(DVMA_VADDR, DVMA_END);
2766 start_mem = sun4c_init_lock_areas(start_mem);
2767 sun4c_init_fill_user_ring();
2769 sun4c_set_context(0);
2770 memset(swapper_pg_dir, 0, PAGE_SIZE);
2771 memset(pg0, 0, PAGE_SIZE);
2772 memset(pg1, 0, PAGE_SIZE);
2773 memset(pg2, 0, PAGE_SIZE);
2774 memset(pg3, 0, PAGE_SIZE);
2776 /* Save work later. */
2777 vaddr = SUN4C_VMALLOC_START;
2778 swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg0);
2779 vaddr += SUN4C_PGDIR_SIZE;
2780 swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg1);
2781 vaddr += SUN4C_PGDIR_SIZE;
2782 swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg2);
2783 vaddr += SUN4C_PGDIR_SIZE;
2784 swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg3);
2785 sun4c_init_ss2_cache_bug();
2786 start_mem = PAGE_ALIGN(start_mem);
2787 start_mem = sparc_context_init(start_mem, num_contexts);
2788 start_mem = free_area_init(start_mem, end_mem);
2789 cnt = 0;
2790 for(i = 0; i < num_segmaps; i++)
2791 if(mmu_entry_pool[i].locked)
2792 cnt++;
2794 max_user_taken_entries = num_segmaps - cnt - 40 - 1;
2796 printk("SUN4C: %d mmu entries for the kernel\n", cnt);
2797 return start_mem;
2800 /* Load up routines and constants for sun4c mmu */
2801 void __init ld_mmu_sun4c(void)
2803 extern void ___xchg32_sun4c(void);
2805 printk("Loading sun4c MMU routines\n");
2807 /* First the constants */
2808 BTFIXUPSET_SIMM13(pmd_shift, SUN4C_PMD_SHIFT);
2809 BTFIXUPSET_SETHI(pmd_size, SUN4C_PMD_SIZE);
2810 BTFIXUPSET_SETHI(pmd_mask, SUN4C_PMD_MASK);
2811 BTFIXUPSET_SIMM13(pgdir_shift, SUN4C_PGDIR_SHIFT);
2812 BTFIXUPSET_SETHI(pgdir_size, SUN4C_PGDIR_SIZE);
2813 BTFIXUPSET_SETHI(pgdir_mask, SUN4C_PGDIR_MASK);
2815 BTFIXUPSET_SIMM13(ptrs_per_pte, SUN4C_PTRS_PER_PTE);
2816 BTFIXUPSET_SIMM13(ptrs_per_pmd, SUN4C_PTRS_PER_PMD);
2817 BTFIXUPSET_SIMM13(ptrs_per_pgd, SUN4C_PTRS_PER_PGD);
2818 BTFIXUPSET_SIMM13(user_ptrs_per_pgd, KERNBASE / SUN4C_PGDIR_SIZE);
2820 BTFIXUPSET_INT(page_none, pgprot_val(SUN4C_PAGE_NONE));
2821 BTFIXUPSET_INT(page_shared, pgprot_val(SUN4C_PAGE_SHARED));
2822 BTFIXUPSET_INT(page_copy, pgprot_val(SUN4C_PAGE_COPY));
2823 BTFIXUPSET_INT(page_readonly, pgprot_val(SUN4C_PAGE_READONLY));
2824 BTFIXUPSET_INT(page_kernel, pgprot_val(SUN4C_PAGE_KERNEL));
2825 pg_iobits = _SUN4C_PAGE_PRESENT | _SUN4C_READABLE | _SUN4C_WRITEABLE |
2826 _SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE;
2828 /* Functions */
2829 #ifndef __SMP__
2830 BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4c, BTFIXUPCALL_NORM);
2831 #endif
2832 BTFIXUPSET_CALL(get_pte_fast, sun4c_pte_get, BTFIXUPCALL_NORM);
2833 BTFIXUPSET_CALL(get_pgd_fast, sun4c_pgd_alloc, BTFIXUPCALL_NORM);
2834 BTFIXUPSET_CALL(free_pte_slow, sun4c_free_pte_slow, BTFIXUPCALL_NORM);
2835 BTFIXUPSET_CALL(free_pgd_slow, sun4c_free_pgd_slow, BTFIXUPCALL_NORM);
2836 BTFIXUPSET_CALL(do_check_pgt_cache, sun4c_check_pgt_cache, BTFIXUPCALL_NORM);
2838 BTFIXUPSET_CALL(set_pgdir, sun4c_set_pgdir, BTFIXUPCALL_NOP);
2840 BTFIXUPSET_CALL(flush_cache_all, sun4c_flush_cache_all, BTFIXUPCALL_NORM);
2842 if(sun4c_vacinfo.do_hwflushes) {
2843 BTFIXUPSET_CALL(flush_cache_mm, sun4c_flush_cache_mm_hw, BTFIXUPCALL_NORM);
2844 BTFIXUPSET_CALL(flush_cache_range, sun4c_flush_cache_range_hw, BTFIXUPCALL_NORM);
2845 BTFIXUPSET_CALL(flush_cache_page, sun4c_flush_cache_page_hw, BTFIXUPCALL_NORM);
2846 BTFIXUPSET_CALL(flush_page_to_ram, sun4c_flush_page_to_ram_hw, BTFIXUPCALL_NORM);
2847 BTFIXUPSET_CALL(flush_tlb_mm, sun4c_flush_tlb_mm_hw, BTFIXUPCALL_NORM);
2848 BTFIXUPSET_CALL(flush_tlb_range, sun4c_flush_tlb_range_hw, BTFIXUPCALL_NORM);
2849 BTFIXUPSET_CALL(flush_tlb_page, sun4c_flush_tlb_page_hw, BTFIXUPCALL_NORM);
2850 BTFIXUPSET_CALL(free_task_struct, sun4c_free_task_struct_hw, BTFIXUPCALL_NORM);
2851 BTFIXUPSET_CALL(switch_to_context, sun4c_switch_to_context_hw, BTFIXUPCALL_NORM);
2852 BTFIXUPSET_CALL(destroy_context, sun4c_destroy_context_hw, BTFIXUPCALL_NORM);
2853 BTFIXUPSET_CALL(init_new_context, sun4c_init_new_context_hw, BTFIXUPCALL_NORM);
2854 } else {
2855 BTFIXUPSET_CALL(flush_cache_mm, sun4c_flush_cache_mm_sw, BTFIXUPCALL_NORM);
2856 BTFIXUPSET_CALL(flush_cache_range, sun4c_flush_cache_range_sw, BTFIXUPCALL_NORM);
2857 BTFIXUPSET_CALL(flush_cache_page, sun4c_flush_cache_page_sw, BTFIXUPCALL_NORM);
2858 BTFIXUPSET_CALL(flush_page_to_ram, sun4c_flush_page_to_ram_sw, BTFIXUPCALL_NORM);
2859 BTFIXUPSET_CALL(flush_tlb_mm, sun4c_flush_tlb_mm_sw, BTFIXUPCALL_NORM);
2860 BTFIXUPSET_CALL(flush_tlb_range, sun4c_flush_tlb_range_sw, BTFIXUPCALL_NORM);
2861 BTFIXUPSET_CALL(flush_tlb_page, sun4c_flush_tlb_page_sw, BTFIXUPCALL_NORM);
2862 BTFIXUPSET_CALL(free_task_struct, sun4c_free_task_struct_sw, BTFIXUPCALL_NORM);
2863 BTFIXUPSET_CALL(switch_to_context, sun4c_switch_to_context_sw, BTFIXUPCALL_NORM);
2864 BTFIXUPSET_CALL(destroy_context, sun4c_destroy_context_sw, BTFIXUPCALL_NORM);
2865 BTFIXUPSET_CALL(init_new_context, sun4c_init_new_context_sw, BTFIXUPCALL_NORM);
2868 BTFIXUPSET_CALL(flush_tlb_all, sun4c_flush_tlb_all, BTFIXUPCALL_NORM);
2870 BTFIXUPSET_CALL(flush_sig_insns, sun4c_flush_sig_insns, BTFIXUPCALL_NOP);
2872 BTFIXUPSET_CALL(set_pte, sun4c_set_pte, BTFIXUPCALL_STO1O0);
2874 BTFIXUPSET_CALL(pte_page, sun4c_pte_page, BTFIXUPCALL_NORM);
2875 #if PAGE_SHIFT <= 12
2876 BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_ANDNINT(PAGE_SIZE - 1));
2877 #else
2878 BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_NORM);
2879 #endif
2881 BTFIXUPSET_CALL(sparc_update_rootmmu_dir, sun4c_update_rootmmu_dir, BTFIXUPCALL_NOP);
2883 BTFIXUPSET_CALL(pte_present, sun4c_pte_present, BTFIXUPCALL_NORM);
2884 BTFIXUPSET_CALL(pte_clear, sun4c_pte_clear, BTFIXUPCALL_STG0O0);
2886 BTFIXUPSET_CALL(pmd_bad, sun4c_pmd_bad, BTFIXUPCALL_NORM);
2887 BTFIXUPSET_CALL(pmd_present, sun4c_pmd_present, BTFIXUPCALL_NORM);
2888 BTFIXUPSET_CALL(pmd_clear, sun4c_pmd_clear, BTFIXUPCALL_STG0O0);
2890 BTFIXUPSET_CALL(pgd_none, sun4c_pgd_none, BTFIXUPCALL_RETINT(0));
2891 BTFIXUPSET_CALL(pgd_bad, sun4c_pgd_bad, BTFIXUPCALL_RETINT(0));
2892 BTFIXUPSET_CALL(pgd_present, sun4c_pgd_present, BTFIXUPCALL_RETINT(1));
2893 BTFIXUPSET_CALL(pgd_clear, sun4c_pgd_clear, BTFIXUPCALL_NOP);
2895 BTFIXUPSET_CALL(mk_pte, sun4c_mk_pte, BTFIXUPCALL_NORM);
2896 BTFIXUPSET_CALL(mk_pte_phys, sun4c_mk_pte_phys, BTFIXUPCALL_NORM);
2897 BTFIXUPSET_CALL(mk_pte_io, sun4c_mk_pte_io, BTFIXUPCALL_NORM);
2899 BTFIXUPSET_INT(pte_modify_mask, _SUN4C_PAGE_CHG_MASK);
2900 BTFIXUPSET_CALL(pgd_offset, sun4c_pgd_offset, BTFIXUPCALL_NORM);
2901 BTFIXUPSET_CALL(pmd_offset, sun4c_pmd_offset, BTFIXUPCALL_NORM);
2902 BTFIXUPSET_CALL(pte_offset, sun4c_pte_offset, BTFIXUPCALL_NORM);
2903 BTFIXUPSET_CALL(pte_free_kernel, sun4c_pte_free_kernel, BTFIXUPCALL_NORM);
2904 BTFIXUPSET_CALL(pmd_free_kernel, sun4c_pmd_free_kernel, BTFIXUPCALL_NOP);
2905 BTFIXUPSET_CALL(pte_alloc_kernel, sun4c_pte_alloc_kernel, BTFIXUPCALL_NORM);
2906 BTFIXUPSET_CALL(pmd_alloc_kernel, sun4c_pmd_alloc_kernel, BTFIXUPCALL_RETO0);
2907 BTFIXUPSET_CALL(pte_free, sun4c_pte_free, BTFIXUPCALL_NORM);
2908 BTFIXUPSET_CALL(pte_alloc, sun4c_pte_alloc, BTFIXUPCALL_NORM);
2909 BTFIXUPSET_CALL(pmd_free, sun4c_pmd_free, BTFIXUPCALL_NOP);
2910 BTFIXUPSET_CALL(pmd_alloc, sun4c_pmd_alloc, BTFIXUPCALL_RETO0);
2911 BTFIXUPSET_CALL(pgd_free, sun4c_pgd_free, BTFIXUPCALL_NORM);
2912 BTFIXUPSET_CALL(pgd_alloc, sun4c_pgd_alloc, BTFIXUPCALL_NORM);
2914 BTFIXUPSET_HALF(pte_writei, _SUN4C_PAGE_WRITE);
2915 BTFIXUPSET_HALF(pte_dirtyi, _SUN4C_PAGE_MODIFIED);
2916 BTFIXUPSET_HALF(pte_youngi, _SUN4C_PAGE_ACCESSED);
2917 BTFIXUPSET_HALF(pte_wrprotecti, _SUN4C_PAGE_WRITE|_SUN4C_PAGE_SILENT_WRITE);
2918 BTFIXUPSET_HALF(pte_mkcleani, _SUN4C_PAGE_MODIFIED|_SUN4C_PAGE_SILENT_WRITE);
2919 BTFIXUPSET_HALF(pte_mkoldi, _SUN4C_PAGE_ACCESSED|_SUN4C_PAGE_SILENT_READ);
2920 BTFIXUPSET_CALL(pte_mkwrite, sun4c_pte_mkwrite, BTFIXUPCALL_NORM);
2921 BTFIXUPSET_CALL(pte_mkdirty, sun4c_pte_mkdirty, BTFIXUPCALL_NORM);
2922 BTFIXUPSET_CALL(pte_mkyoung, sun4c_pte_mkyoung, BTFIXUPCALL_NORM);
2923 BTFIXUPSET_CALL(update_mmu_cache, sun4c_update_mmu_cache, BTFIXUPCALL_NORM);
2925 BTFIXUPSET_CALL(mmu_lockarea, sun4c_lockarea, BTFIXUPCALL_NORM);
2926 BTFIXUPSET_CALL(mmu_unlockarea, sun4c_unlockarea, BTFIXUPCALL_NORM);
2928 BTFIXUPSET_CALL(mmu_get_scsi_one, sun4c_get_scsi_one, BTFIXUPCALL_NORM);
2929 BTFIXUPSET_CALL(mmu_get_scsi_sgl, sun4c_get_scsi_sgl, BTFIXUPCALL_NORM);
2930 BTFIXUPSET_CALL(mmu_release_scsi_one, sun4c_release_scsi_one, BTFIXUPCALL_NORM);
2931 BTFIXUPSET_CALL(mmu_release_scsi_sgl, sun4c_release_scsi_sgl, BTFIXUPCALL_NORM);
2933 BTFIXUPSET_CALL(mmu_map_dma_area, sun4c_map_dma_area, BTFIXUPCALL_NORM);
2935 BTFIXUPSET_CALL(mmu_v2p, sun4c_v2p, BTFIXUPCALL_NORM);
2936 BTFIXUPSET_CALL(mmu_p2v, sun4c_p2v, BTFIXUPCALL_NORM);
2938 /* Task struct and kernel stack allocating/freeing. */
2939 BTFIXUPSET_CALL(alloc_task_struct, sun4c_alloc_task_struct, BTFIXUPCALL_NORM);
2941 BTFIXUPSET_CALL(quick_kernel_fault, sun4c_quick_kernel_fault, BTFIXUPCALL_NORM);
2942 BTFIXUPSET_CALL(mmu_info, sun4c_mmu_info, BTFIXUPCALL_NORM);
2944 /* These should _never_ get called with two level tables. */
2945 BTFIXUPSET_CALL(pgd_set, sun4c_pgd_set, BTFIXUPCALL_NOP);
2946 BTFIXUPSET_CALL(pgd_page, sun4c_pgd_page, BTFIXUPCALL_RETO0);