2 #include <aros/debug.h>
3 #include <proto/exec.h>
5 #include "kernel_base.h"
6 #include "kernel_intern.h"
8 /* 68030 (68851), 68040 and 68060 supported, 68030 (68851) is configured like a 68040,
9 * no 68030 special features used, not worth the extra complexity */
14 #define PAGE_SIZE 12 // = 1 << 12 = 4096
16 /* Macros that hopefully make MMU magic a bit easier to understand.. */
18 #define LEVELA_VAL(x) ((((ULONG)(x)) >> (32 - (LEVELA_SIZE ))) & ((1 << LEVELA_SIZE) - 1))
19 #define LEVELB_VAL(x) ((((ULONG)(x)) >> (32 - (LEVELA_SIZE + LEVELB_SIZE ))) & ((1 << LEVELB_SIZE) - 1))
20 #define LEVELC_VAL(x) ((((ULONG)(x)) >> (32 - (LEVELA_SIZE + LEVELB_SIZE + LEVELC_SIZE))) & ((1 << LEVELC_SIZE) - 1))
22 #define LEVELA(root, x) (root[LEVELA_VAL(x)])
23 #define LEVELB(a, x) (((ULONG*)(((ULONG)a) & ~((1 << (LEVELB_SIZE + 2)) - 1)))[LEVELB_VAL(x)])
24 #define LEVELC(b, x) (((ULONG*)(((ULONG)b) & ~((1 << (LEVELC_SIZE + 2)) - 1)))[LEVELC_VAL(x)])
26 #define INVALID_DESCRIPTOR 0xDEAD0000
27 #define ISINVALID(x) ((((ULONG)x) & 3) == 0)
29 static BOOL
map_region2(struct KernelBase
*kb
, void *addr
, void *physaddr
, ULONG size
, BOOL invalid
, BOOL writeprotect
, BOOL supervisor
, UBYTE cachemode
);
32 static void map_pagetable(struct KernelBase
*kb
, void *addr
, ULONG size
)
34 /* 68040+ MMU tables should be serialized */
35 map_region2(kb
, addr
, NULL
, size
, FALSE
, FALSE
, FALSE
, CM_SERIALIZED
);
38 /* Allocate MMU descriptor page, it needs to be (1 << bits) * sizeof(ULONG) aligned */
39 static ULONG
alloc_descriptor(struct KernelBase
*kb
, UBYTE mmutype
, UBYTE bits
, UBYTE level
)
41 struct PlatformData
*pd
= kb
->kb_PlatformData
;
43 ULONG size
= sizeof(ULONG
) * (1 << bits
);
44 ULONG ps
= 1 << PAGE_SIZE
;
47 while (pd
->page_free
>= size
&& (((ULONG
)pd
->page_ptr
) & (size
- 1))) {
48 pd
->page_ptr
+= 0x100;
49 pd
->page_free
-= 0x100;
51 while (pd
->page_free
< size
) {
52 /* allocate in aligned blocks of PAGE_SIZE */
53 UBYTE
*mem
, *newmem
, *pagemem
;
55 mem
= AllocMem(2 * ps
, MEMF_PUBLIC
);
60 newmem
= (UBYTE
*)((((ULONG
)mem
) + ps
- 1) & ~(ps
- 1));
61 pagemem
= AllocAbs(ps
, newmem
);
65 pd
->page_ptr
= pagemem
;
67 // bug("New chunk %p-%p\n", pagemem, pagemem + ps - 1);
68 if (level
> 0 && mmutype
>= MMU040
)
69 map_pagetable(kb
, pagemem
, ps
);
71 desc
= (ULONG
*)pd
->page_ptr
;
72 for (i
= 0; i
< (1 << bits
); i
++)
73 desc
[i
] = INVALID_DESCRIPTOR
;
75 if (mmutype
== MMU030
)
76 dout
|= 2; /* Valid 4 byte descriptor */
78 dout
|= 3; /* Resident descriptor */
79 // bug("Level%c %p-%p: %08x\n", level + 'A', pd->page_ptr, pd->page_ptr + size - 1, dout);
81 pd
->page_free
-= size
;
85 BOOL
init_mmu(struct KernelBase
*kb
)
87 UBYTE mmutype
= kb
->kb_PlatformData
->mmu_type
;
91 kb
->kb_PlatformData
->MMU_Level_A
= (ULONG
*)(alloc_descriptor(kb
, mmutype
, LEVELA_SIZE
, 0) & ~3);
92 if (!kb
->kb_PlatformData
->MMU_Level_A
) {
93 kb
->kb_PlatformData
->mmu_type
= 0;
96 if (mmutype
>= MMU040
)
97 map_pagetable(kb
, kb
->kb_PlatformData
->MMU_Level_A
, 1 << PAGE_SIZE
);
101 static void enable_mmu030(ULONG
*levela
)
107 "lea .esuper030(%%pc),%%a5\n"
111 /* Do not interrupt us */
114 /* Disable MMU, setup root pointers,
115 * uses 68040 MMU descriptor levels (7/7/6, 4K page size) */
116 "move.l #0x00c07760,%%d1\n"
117 "move.l %%d1,%%a7@\n"
119 /* Set bus error exception vector */
121 "move.l #buserror030,%%a5@(8)\n"
122 /* Configure CRP. Valid 4 byte descriptor, other features disabled. */
123 "move.l #0x80000002,%%a7@\n"
124 /* First level descriptor pointer */
125 "move.l %%d0,%%a7@(4)\n"
127 "pmove %%a7@,%%crp\n"
128 /* Set MMU enabled bit */
130 "move.l %%d1,%%a7@\n"
133 /* Clear transparent translation */
135 "pmove %%a7@,%%tt0\n"
136 "pmove %%a7@,%%tt1\n"
140 : : "m" (levela
) : "d0", "d1", "a5", "a6");
142 static void disable_mmu030(void)
147 "lea .dsuper030(%%pc),%%a5\n"
151 /* Do not interrupt us */
160 : : : "d0", "d1", "a5", "a6");
162 static void enable_mmu040(ULONG
*levela
, UBYTE cpu060
, UBYTE
*zeropagedescriptor
)
170 "lea .esuper040(%%pc),%%a5\n"
174 /* Do not interrupt us */
177 "move.l %%a1,253*4(%%a5)\n"
178 "lea buserror040,%%a6\n"
181 "lea buserror060,%%a6\n"
183 "move.l %%a6,%%a5@(8)\n"
185 /* Disable MMU, setup root pointers */
189 /* Flush data caches and ATC */
193 /* Enable MMU, 4K page size */
194 "move.l #0x00008000,%%d0\n"
196 /* Disable transparent translation */
197 "movec %%d1,%%itt0\n"
198 "movec %%d1,%%itt1\n"
199 "movec %%d1,%%dtt0\n"
200 "movec %%d1,%%dtt1\n"
203 : : "m" (levela
), "m" (cpu060
), "m" (zeropagedescriptor
) : "d0", "d1", "a1", "a5", "a6");
206 static void disable_mmu040(void)
211 "lea .dsuper040(%%pc),%%a5\n"
215 /* Do not interrupt us */
223 : : : "d0", "d1", "a5", "a6");
226 void enable_mmu(struct KernelBase
*kb
)
228 if (!kb
->kb_PlatformData
->mmu_type
)
230 if (kb
->kb_PlatformData
->mmu_type
== MMU030
)
231 enable_mmu030(kb
->kb_PlatformData
->MMU_Level_A
);
233 enable_mmu040(kb
->kb_PlatformData
->MMU_Level_A
, kb
->kb_PlatformData
->mmu_type
== MMU060
, kb
->kb_PlatformData
->zeropagedescriptor
);
235 void disable_mmu(struct KernelBase
*kb
)
237 if (!kb
->kb_PlatformData
->mmu_type
)
239 if (kb
->kb_PlatformData
->mmu_type
== MMU030
)
245 static ULONG
getdesc(struct KernelBase
*kb
, ULONG addr
)
249 desc
= LEVELA(kb
->kb_PlatformData
->MMU_Level_A
, addr
);
252 desc
= LEVELB(desc
, addr
);
255 desc
= LEVELC(desc
, addr
);
259 void debug_mmu(struct KernelBase
*kb
)
266 ULONG pagemask
= (1 << PAGE_SIZE
) - 1;
268 mmutype
= kb
->kb_PlatformData
->mmu_type
;
269 if (!mmutype
|| kb
->kb_PlatformData
->MMU_Level_A
== NULL
)
271 bug("MMU dump start. Root = %p\n", kb
->kb_PlatformData
->MMU_Level_A
);
272 totalpages
= 1 << (32 - PAGE_SIZE
);
274 odesc
= getdesc(kb
, startaddr
);
275 for (i
= 0; i
<= totalpages
; i
++) {
276 ULONG addr
= i
<< PAGE_SIZE
;
279 desc
= getdesc(kb
, addr
);
280 if ((desc
& pagemask
) != (odesc
& pagemask
) || i
== totalpages
) {
282 if (mmutype
== MMU030
) {
283 cm
= (odesc
>> 6) & 1;
286 cm
= (odesc
>> 5) & 3;
287 sp
= (odesc
>> 7) & 1;
289 bug("%p - %p: %p WP=%d S=%d CM=%d (%08x)\n",
290 startaddr
, addr
- 1, odesc
& ~((1 << PAGE_SIZE
) - 1),
291 (odesc
& 4) ? 1 : 0, sp
, cm
, odesc
);
296 bug("MMU dump end\n");
299 static BOOL
map_region2(struct KernelBase
*kb
, void *addr
, void *physaddr
, ULONG size
, BOOL invalid
, BOOL writeprotect
, BOOL supervisor
, UBYTE cachemode
)
301 struct PlatformData
*pd
= kb
->kb_PlatformData
;
302 ULONG desca
, descb
, descc
, pagedescriptor
;
303 ULONG page_size
= 1 << PAGE_SIZE
;
304 ULONG page_mask
= page_size
- 1;
307 mmutype
= pd
->mmu_type
;
310 if (kb
->kb_PlatformData
->MMU_Level_A
== NULL
)
313 if ((size
& page_mask
) || (((ULONG
)addr
) & page_mask
) || (((ULONG
)physaddr
) & page_mask
)) {
314 bug("unaligned MMU page request! %p (%p) %08x\n", addr
, physaddr
, size
);
317 if (physaddr
== NULL
)
321 desca
= LEVELA(kb
->kb_PlatformData
->MMU_Level_A
, addr
);
322 if (ISINVALID(desca
))
323 desca
= LEVELA(kb
->kb_PlatformData
->MMU_Level_A
, addr
) = alloc_descriptor(kb
, mmutype
, LEVELB_SIZE
, 1);
324 if (ISINVALID(desca
))
326 descb
= LEVELB(desca
, addr
);
327 if (ISINVALID(descb
))
328 descb
= LEVELB(desca
, addr
) = alloc_descriptor(kb
, mmutype
, LEVELC_SIZE
, 2);
329 if (ISINVALID(descb
))
331 descc
= LEVELC(descb
, addr
);
334 pagedescriptor
= INVALID_DESCRIPTOR
;
335 if (addr
== 0 && size
== page_size
) {
336 /* special case zero page handling */
337 pd
->zeropagedescriptor
= (UBYTE
*)(& LEVELC(descb
, addr
)) + 3;
338 pagedescriptor
= ((ULONG
)physaddr
) & ~page_mask
;
339 if (mmutype
== MMU030
) {
341 pagedescriptor
|= 1 << 6;
343 pagedescriptor
|= 4; // write-protected
344 pagedescriptor
|= CM_SERIALIZED
<< 5;
348 BOOL wasinvalid
= ISINVALID(descc
);
349 pagedescriptor
= ((ULONG
)physaddr
) & ~page_mask
;
350 if (mmutype
== MMU030
) {
351 pagedescriptor
|= 1; // page descriptor
352 if (writeprotect
|| (!wasinvalid
&& (descc
& 4)))
353 pagedescriptor
|= 4; // write-protected
354 /* 68030 can only enable or disable caching */
355 if (cachemode
>= CM_SERIALIZED
|| (!wasinvalid
&& (descc
& (1 << 6))))
356 pagedescriptor
|= 1 << 6;
358 pagedescriptor
|= 3; // resident page
359 if (writeprotect
|| (!wasinvalid
&& (descc
& 4)))
360 pagedescriptor
|= 4; // write-protected
361 if (supervisor
|| (!wasinvalid
&& (descc
& (1 << 7))))
362 pagedescriptor
|= 1 << 7;
363 // do not override non-cached
364 if (wasinvalid
|| cachemode
> ((descc
>> 5) & 3))
365 pagedescriptor
|= cachemode
<< 5;
367 pagedescriptor
|= ((descc
>> 5) & 3) << 5;
368 if (addr
!= 0 || size
!= page_size
)
369 pagedescriptor
|= 1 << 10; // global if not zero page
373 LEVELC(descb
, addr
) = pagedescriptor
;
376 physaddr
+= page_size
;
382 BOOL
map_region(struct KernelBase
*kb
, void *addr
, void *physaddr
, ULONG size
, BOOL invalid
, BOOL writeprotect
, BOOL supervisor
, UBYTE cachemode
)
384 bug("map_region(%p, %p, %08x, in=%d, wp=%d, s=%d cm=%d\n",
385 addr
, physaddr
, size
, invalid
? 1 : 0, writeprotect
? 1 : 0, supervisor
? 1 : 0, cachemode
);
386 return map_region2(kb
, addr
, physaddr
, size
, invalid
, writeprotect
, supervisor
, cachemode
);
389 BOOL
unmap_region(struct KernelBase
*kb
, void *addr
, ULONG size
)
391 bug("unmap_region(%p, %08x)\n", addr
, size
);
392 return map_region2(kb
, addr
, NULL
, size
, TRUE
, FALSE
, FALSE
, 0);