2 #include <aros/debug.h>
3 #include <proto/exec.h>
5 #include "kernel_base.h"
6 #include "kernel_intern.h"
8 /* 68030 (68851), 68040 and 68060 supported, 68030 (68851) is configured like a 68040,
9 * no 68030 special features used, not worth the extra complexity */
14 #define PAGE_SIZE 12 // = 1 << 12 = 4096
16 /* Macros that hopefully make MMU magic a bit easier to understand.. */
18 #define LEVELA_VAL(x) ((((ULONG)(x)) >> (32 - (LEVELA_SIZE ))) & ((1 << LEVELA_SIZE) - 1))
19 #define LEVELB_VAL(x) ((((ULONG)(x)) >> (32 - (LEVELA_SIZE + LEVELB_SIZE ))) & ((1 << LEVELB_SIZE) - 1))
20 #define LEVELC_VAL(x) ((((ULONG)(x)) >> (32 - (LEVELA_SIZE + LEVELB_SIZE + LEVELC_SIZE))) & ((1 << LEVELC_SIZE) - 1))
22 #define LEVELA(root, x) (root[LEVELA_VAL(x)])
23 #define LEVELB(a, x) (((ULONG*)(((ULONG)a) & ~((1 << (LEVELB_SIZE + 2)) - 1)))[LEVELB_VAL(x)])
24 #define LEVELC(b, x) (((ULONG*)(((ULONG)b) & ~((1 << (LEVELC_SIZE + 2)) - 1)))[LEVELC_VAL(x)])
26 #define INVALID_DESCRIPTOR 0xDEAD0000
27 #define ISINVALID(x) ((((ULONG)x) & 3) == 0)
29 static BOOL
map_region2(struct KernelBase
*kb
, void *addr
, void *physaddr
, ULONG size
, BOOL invalid
, BOOL writeprotect
, BOOL supervisor
, UBYTE cachemode
);
32 static void map_pagetable(struct KernelBase
*kb
, void *addr
, ULONG size
)
34 /* 68040+ MMU tables should be serialized */
35 map_region2(kb
, addr
, NULL
, size
, FALSE
, FALSE
, FALSE
, CM_SERIALIZED
);
38 /* Allocate MMU descriptor page, it needs to be (1 << bits) * sizeof(ULONG) aligned */
39 static ULONG
alloc_descriptor(struct KernelBase
*kb
, UBYTE mmutype
, UBYTE bits
, UBYTE level
)
41 struct PlatformData
*pd
= kb
->kb_PlatformData
;
43 ULONG size
= sizeof(ULONG
) * (1 << bits
);
44 ULONG ps
= 1 << PAGE_SIZE
;
47 while (pd
->page_free
>= size
&& (((ULONG
)pd
->page_ptr
) & (size
- 1))) {
48 pd
->page_ptr
+= 0x100;
49 pd
->page_free
-= 0x100;
51 while (pd
->page_free
< size
) {
52 /* allocate in aligned blocks of PAGE_SIZE */
53 UBYTE
*mem
, *newmem
, *pagemem
;
55 mem
= AllocMem(2 * ps
, MEMF_PUBLIC
);
60 newmem
= (UBYTE
*)((((ULONG
)mem
) + ps
- 1) & ~(ps
- 1));
61 pagemem
= AllocAbs(ps
, newmem
);
65 pd
->page_ptr
= pagemem
;
67 // bug("New chunk %p-%p\n", pagemem, pagemem + ps - 1);
68 if (level
> 0 && mmutype
>= MMU040
)
69 map_pagetable(kb
, pagemem
, ps
);
71 desc
= (ULONG
*)pd
->page_ptr
;
72 for (i
= 0; i
< (1 << bits
); i
++)
73 desc
[i
] = INVALID_DESCRIPTOR
;
75 if (mmutype
== MMU030
)
76 dout
|= 2; /* Valid 4 byte descriptor */
78 dout
|= 3; /* Resident descriptor */
79 // bug("Level%c %p-%p: %08x\n", level + 'A', pd->page_ptr, pd->page_ptr + size - 1, dout);
81 pd
->page_free
-= size
;
85 BOOL
init_mmu(struct KernelBase
*kb
)
87 UBYTE mmutype
= kb
->kb_PlatformData
->mmu_type
;
91 kb
->kb_PlatformData
->MMU_Level_A
= (ULONG
*)(alloc_descriptor(kb
, mmutype
, LEVELA_SIZE
, 0) & ~3);
92 if (!kb
->kb_PlatformData
->MMU_Level_A
) {
93 kb
->kb_PlatformData
->mmu_type
= 0;
96 if (mmutype
>= MMU040
)
97 map_pagetable(kb
, kb
->kb_PlatformData
->MMU_Level_A
, 1 << PAGE_SIZE
);
101 static void enable_mmu030(ULONG
*levela
)
107 "lea .esuper030(%%pc),%%a5\n"
111 /* Do not interrupt us */
114 /* Disable MMU, setup root pointers,
115 * uses 68040 MMU descriptor levels (7/7/6, 4K page size) */
116 "move.l #0x00c07760,%%d1\n"
117 "move.l %%d1,%%a7@\n"
119 /* Set bus error exception vector */
121 "move.l #addrerror030,%%a5@(12)\n"
122 "move.l #buserror030,%%a5@(8)\n"
123 /* Configure CRP. Valid 4 byte descriptor, other features disabled. */
124 "move.l #0x80000002,%%a7@\n"
125 /* First level descriptor pointer */
126 "move.l %%d0,%%a7@(4)\n"
128 "pmove %%a7@,%%crp\n"
129 /* Set MMU enabled bit */
131 "move.l %%d1,%%a7@\n"
134 /* Clear transparent translation */
136 "pmove %%a7@,%%tt0\n"
137 "pmove %%a7@,%%tt1\n"
141 : : "m" (levela
) : "d0", "d1", "a5", "a6");
143 static void disable_mmu030(void)
148 "lea .dsuper030(%%pc),%%a5\n"
152 /* Do not interrupt us */
161 : : : "d0", "d1", "a5", "a6");
163 static void enable_mmu040(ULONG
*levela
, UBYTE cpu060
, UBYTE
*zeropagedescriptor
)
171 "lea .esuper040(%%pc),%%a5\n"
175 /* Do not interrupt us */
178 "move.l %%a1,253*4(%%a5)\n"
179 "lea buserror040,%%a6\n"
180 "lea addrerror040,%%a0\n"
183 "lea buserror060,%%a6\n"
184 "lea addrerror060,%%a0\n"
186 "move.l %%a6,%%a5@(8)\n"
187 "move.l %%a0,%%a5@(12)\n"
189 /* Disable MMU, setup root pointers */
193 /* Flush data caches and ATC */
197 /* Enable MMU, 4K page size */
198 "move.l #0x00008000,%%d0\n"
200 /* Disable transparent translation */
201 "movec %%d1,%%itt0\n"
202 "movec %%d1,%%itt1\n"
203 "movec %%d1,%%dtt0\n"
204 "movec %%d1,%%dtt1\n"
207 : : "m" (levela
), "m" (cpu060
), "m" (zeropagedescriptor
) : "d0", "d1", "a1", "a5", "a6");
210 static void disable_mmu040(void)
215 "lea .dsuper040(%%pc),%%a5\n"
219 /* Do not interrupt us */
227 : : : "d0", "d1", "a5", "a6");
230 void enable_mmu(struct KernelBase
*kb
)
232 if (!kb
->kb_PlatformData
->mmu_type
)
234 if (kb
->kb_PlatformData
->mmu_type
== MMU030
)
235 enable_mmu030(kb
->kb_PlatformData
->MMU_Level_A
);
237 enable_mmu040(kb
->kb_PlatformData
->MMU_Level_A
, kb
->kb_PlatformData
->mmu_type
== MMU060
, kb
->kb_PlatformData
->zeropagedescriptor
);
239 void disable_mmu(struct KernelBase
*kb
)
241 if (!kb
->kb_PlatformData
->mmu_type
)
243 if (kb
->kb_PlatformData
->mmu_type
== MMU030
)
250 static ULONG
getdesc(struct KernelBase
*kb
, ULONG addr
)
254 desc
= LEVELA(kb
->kb_PlatformData
->MMU_Level_A
, addr
);
257 desc
= LEVELB(desc
, addr
);
260 desc
= LEVELC(desc
, addr
);
265 void debug_mmu(struct KernelBase
*kb
)
273 ULONG pagemask
= (1 << PAGE_SIZE
) - 1;
275 mmutype
= kb
->kb_PlatformData
->mmu_type
;
276 if (!mmutype
|| kb
->kb_PlatformData
->MMU_Level_A
== NULL
)
278 bug("MMU dump start. Root = %p\n", kb
->kb_PlatformData
->MMU_Level_A
);
279 totalpages
= 1 << (32 - PAGE_SIZE
);
281 odesc
= getdesc(kb
, startaddr
);
282 for (i
= 0; i
<= totalpages
; i
++) {
283 ULONG addr
= i
<< PAGE_SIZE
;
286 desc
= getdesc(kb
, addr
);
287 if ((desc
& pagemask
) != (odesc
& pagemask
) || i
== totalpages
) {
289 if (mmutype
== MMU030
) {
290 cm
= (odesc
>> 6) & 1;
293 cm
= (odesc
>> 5) & 3;
294 sp
= (odesc
>> 7) & 1;
296 bug("%p - %p: %p WP=%d S=%d CM=%d (%08x)\n",
297 startaddr
, addr
- 1, odesc
& ~((1 << PAGE_SIZE
) - 1),
298 (odesc
& 4) ? 1 : 0, sp
, cm
, odesc
);
303 bug("MMU dump end\n");
307 static BOOL
map_region2(struct KernelBase
*kb
, void *addr
, void *physaddr
, ULONG size
, BOOL invalid
, BOOL writeprotect
, BOOL supervisor
, UBYTE cachemode
)
309 struct PlatformData
*pd
= kb
->kb_PlatformData
;
310 ULONG desca
, descb
, descc
, pagedescriptor
;
311 ULONG page_size
= 1 << PAGE_SIZE
;
312 ULONG page_mask
= page_size
- 1;
315 mmutype
= pd
->mmu_type
;
318 if (kb
->kb_PlatformData
->MMU_Level_A
== NULL
)
321 if ((size
& page_mask
) || (((ULONG
)addr
) & page_mask
) || (((ULONG
)physaddr
) & page_mask
)) {
322 D(bug("unaligned MMU page request! %p (%p) %08x\n", addr
, physaddr
, size
));
325 if (physaddr
== NULL
)
329 desca
= LEVELA(kb
->kb_PlatformData
->MMU_Level_A
, addr
);
330 if (ISINVALID(desca
))
331 desca
= LEVELA(kb
->kb_PlatformData
->MMU_Level_A
, addr
) = alloc_descriptor(kb
, mmutype
, LEVELB_SIZE
, 1);
332 if (ISINVALID(desca
))
334 descb
= LEVELB(desca
, addr
);
335 if (ISINVALID(descb
))
336 descb
= LEVELB(desca
, addr
) = alloc_descriptor(kb
, mmutype
, LEVELC_SIZE
, 2);
337 if (ISINVALID(descb
))
339 descc
= LEVELC(descb
, addr
);
341 if (addr
== 0 && pd
->zeropagedescriptor
== NULL
) {
342 /* special case zero page handling */
343 pd
->zeropagedescriptor
= (UBYTE
*)(& LEVELC(descb
, addr
)) + 3;
347 pagedescriptor
= INVALID_DESCRIPTOR
;
348 if (addr
== 0 && size
== page_size
) {
349 pagedescriptor
= ((ULONG
)physaddr
) & ~page_mask
;
350 if (mmutype
== MMU030
) {
352 pagedescriptor
|= 1 << 6;
354 pagedescriptor
|= 4; // write-protected
355 pagedescriptor
|= CM_SERIALIZED
<< 5;
359 BOOL wasinvalid
= ISINVALID(descc
);
360 pagedescriptor
= ((ULONG
)physaddr
) & ~page_mask
;
361 if (mmutype
== MMU030
) {
362 pagedescriptor
|= 1; // page descriptor
363 if (writeprotect
|| (!wasinvalid
&& (descc
& 4)))
364 pagedescriptor
|= 4; // write-protected
365 /* 68030 can only enable or disable caching */
366 if (cachemode
>= CM_SERIALIZED
|| (!wasinvalid
&& (descc
& (1 << 6))))
367 pagedescriptor
|= 1 << 6;
369 pagedescriptor
|= 3; // resident page
370 if (writeprotect
|| (!wasinvalid
&& (descc
& 4)))
371 pagedescriptor
|= 4; // write-protected
372 if (supervisor
|| (!wasinvalid
&& (descc
& (1 << 7))))
373 pagedescriptor
|= 1 << 7;
374 // do not override non-cached
375 if (wasinvalid
|| cachemode
> ((descc
>> 5) & 3))
376 pagedescriptor
|= cachemode
<< 5;
378 pagedescriptor
|= ((descc
>> 5) & 3) << 5;
379 if (addr
!= 0 || size
!= page_size
)
380 pagedescriptor
|= 1 << 10; // global if not zero page
384 LEVELC(descb
, addr
) = pagedescriptor
;
387 physaddr
+= page_size
;
393 BOOL
map_region(struct KernelBase
*kb
, void *addr
, void *physaddr
, ULONG size
, BOOL invalid
, BOOL writeprotect
, BOOL supervisor
, UBYTE cachemode
)
395 D(bug("map_region(%p, %p, %08x, in=%d, wp=%d, s=%d cm=%d\n",
396 addr
, physaddr
, size
, invalid
? 1 : 0, writeprotect
? 1 : 0, supervisor
? 1 : 0, cachemode
));
397 return map_region2(kb
, addr
, physaddr
, size
, invalid
, writeprotect
, supervisor
, cachemode
);
400 BOOL
unmap_region(struct KernelBase
*kb
, void *addr
, ULONG size
)
402 D(bug("unmap_region(%p, %08x)\n", addr
, size
));
403 return map_region2(kb
, addr
, NULL
, size
, TRUE
, FALSE
, FALSE
, 0);