Patch configure so that we dont need a seperate spec file for x86_64. Pass info about...
[AROS.git] / arch / m68k-all / kernel / mmu.c
blob5d84fa64811c86550e049c076c3bb53853cf0108
2 #include <aros/debug.h>
3 #include <proto/exec.h>
5 #include "kernel_base.h"
6 #include "kernel_intern.h"
8 /* 68030 (68851), 68040 and 68060 supported, 68030 (68851) is configured like a 68040,
9 * no 68030 special features used, not worth the extra complexity */
11 #define LEVELA_SIZE 7
12 #define LEVELB_SIZE 7
13 #define LEVELC_SIZE 6
14 #define PAGE_SIZE 12 // = 1 << 12 = 4096
16 /* Macros that hopefully make MMU magic a bit easier to understand.. */
18 #define LEVELA_VAL(x) ((((ULONG)(x)) >> (32 - (LEVELA_SIZE ))) & ((1 << LEVELA_SIZE) - 1))
19 #define LEVELB_VAL(x) ((((ULONG)(x)) >> (32 - (LEVELA_SIZE + LEVELB_SIZE ))) & ((1 << LEVELB_SIZE) - 1))
20 #define LEVELC_VAL(x) ((((ULONG)(x)) >> (32 - (LEVELA_SIZE + LEVELB_SIZE + LEVELC_SIZE))) & ((1 << LEVELC_SIZE) - 1))
22 #define LEVELA(root, x) (root[LEVELA_VAL(x)])
23 #define LEVELB(a, x) (((ULONG*)(((ULONG)a) & ~((1 << (LEVELB_SIZE + 2)) - 1)))[LEVELB_VAL(x)])
24 #define LEVELC(b, x) (((ULONG*)(((ULONG)b) & ~((1 << (LEVELC_SIZE + 2)) - 1)))[LEVELC_VAL(x)])
26 #define INVALID_DESCRIPTOR 0xDEAD0000
27 #define ISINVALID(x) ((((ULONG)x) & 3) == 0)
29 static BOOL map_region2(struct KernelBase *kb, void *addr, void *physaddr, ULONG size, BOOL invalid, BOOL writeprotect, BOOL supervisor, UBYTE cachemode);
32 static void map_pagetable(struct KernelBase *kb, void *addr, ULONG size)
34 /* 68040+ MMU tables should be serialized */
35 map_region2(kb, addr, NULL, size, FALSE, FALSE, FALSE, CM_SERIALIZED);
38 /* Allocate MMU descriptor page, it needs to be (1 << bits) * sizeof(ULONG) aligned */
39 static ULONG alloc_descriptor(struct KernelBase *kb, UBYTE mmutype, UBYTE bits, UBYTE level)
41 struct PlatformData *pd = kb->kb_PlatformData;
42 ULONG *desc, dout;
43 ULONG size = sizeof(ULONG) * (1 << bits);
44 ULONG ps = 1 << PAGE_SIZE;
45 UWORD i;
47 while (pd->page_free >= size && (((ULONG)pd->page_ptr) & (size - 1))) {
48 pd->page_ptr += 0x100;
49 pd->page_free -= 0x100;
51 while (pd->page_free < size) {
52 /* allocate in aligned blocks of PAGE_SIZE */
53 UBYTE *mem, *newmem, *pagemem;
55 mem = AllocMem(2 * ps, MEMF_PUBLIC);
56 if (!mem)
57 return 0;
58 Forbid();
59 FreeMem(mem, 2 * ps);
60 newmem = (UBYTE*)((((ULONG)mem) + ps - 1) & ~(ps - 1));
61 pagemem = AllocAbs(ps, newmem);
62 Permit();
63 if (!pagemem)
64 return 0;
65 pd->page_ptr = pagemem;
66 pd->page_free = ps;
67 // bug("New chunk %p-%p\n", pagemem, pagemem + ps - 1);
68 if (level > 0 && mmutype >= MMU040)
69 map_pagetable(kb, pagemem, ps);
71 desc = (ULONG*)pd->page_ptr;
72 for (i = 0; i < (1 << bits); i++)
73 desc[i] = INVALID_DESCRIPTOR;
74 dout = (ULONG)desc;
75 if (mmutype == MMU030)
76 dout |= 2; /* Valid 4 byte descriptor */
77 else
78 dout |= 3; /* Resident descriptor */
79 // bug("Level%c %p-%p: %08x\n", level + 'A', pd->page_ptr, pd->page_ptr + size - 1, dout);
80 pd->page_ptr += size;
81 pd->page_free -= size;
82 return dout;
85 BOOL init_mmu(struct KernelBase *kb)
87 UBYTE mmutype = kb->kb_PlatformData->mmu_type;
89 if (!mmutype)
90 return FALSE;
91 kb->kb_PlatformData->MMU_Level_A = (ULONG*)(alloc_descriptor(kb, mmutype, LEVELA_SIZE, 0) & ~3);
92 if (!kb->kb_PlatformData->MMU_Level_A) {
93 kb->kb_PlatformData->mmu_type = 0;
94 return FALSE;
96 if (mmutype >= MMU040)
97 map_pagetable(kb, kb->kb_PlatformData->MMU_Level_A, 1 << PAGE_SIZE);
98 return TRUE;
101 static void enable_mmu030(ULONG *levela)
103 asm volatile (
104 ".chip 68030\n"
105 "move.l %0,%%d0\n"
106 "move.l 4.w,%%a6\n"
107 "lea .esuper030(%%pc),%%a5\n"
108 "jsr -0x1e(%%a6)\n"
109 "bra.s 0f\n"
110 ".esuper030:\n"
111 /* Do not interrupt us */
112 "or #0x0700,%%sr\n"
113 "subq.l #8,%%a7\n"
114 /* Disable MMU, setup root pointers,
115 * uses 68040 MMU descriptor levels (7/7/6, 4K page size) */
116 "move.l #0x00c07760,%%d1\n"
117 "move.l %%d1,%%a7@\n"
118 "pmove %%a7@,%%tc\n"
119 /* Set bus error exception vector */
120 "movec %%vbr,%%a5\n"
121 "move.l #addrerror030,%%a5@(12)\n"
122 "move.l #buserror030,%%a5@(8)\n"
123 /* Configure CRP. Valid 4 byte descriptor, other features disabled. */
124 "move.l #0x80000002,%%a7@\n"
125 /* First level descriptor pointer */
126 "move.l %%d0,%%a7@(4)\n"
127 /* Set CRP */
128 "pmove %%a7@,%%crp\n"
129 /* Set MMU enabled bit */
130 "bset #31,%%d1\n"
131 "move.l %%d1,%%a7@\n"
132 /* MMU on! */
133 "pmove %%a7@,%%tc\n"
134 /* Clear transparent translation */
135 "clr.l %%a7@\n"
136 "pmove %%a7@,%%tt0\n"
137 "pmove %%a7@,%%tt1\n"
138 "addq.l #8,%%a7\n"
139 "rte\n"
140 "0:\n"
141 : : "m" (levela) : "d0", "d1", "a5", "a6");
143 static void disable_mmu030(void)
145 asm volatile (
146 ".chip 68030\n"
147 "move.l 4.w,%%a6\n"
148 "lea .dsuper030(%%pc),%%a5\n"
149 "jsr -0x1e(%%a6)\n"
150 "bra.s 0f\n"
151 ".dsuper030:\n"
152 /* Do not interrupt us */
153 "or #0x0700,%%sr\n"
154 /* Disable MMU */
155 "subq.l #4,%%a7\n"
156 "clr.l %%a7@\n"
157 "pmove %%a7@,%%tc\n"
158 "addq.l #4,%%a7\n"
159 "rte\n"
160 "0:\n"
161 : : : "d0", "d1", "a5", "a6");
163 static void enable_mmu040(ULONG *levela, UBYTE cpu060, UBYTE *zeropagedescriptor)
165 asm volatile (
166 ".chip 68060\n"
167 "move.l %0,%%d0\n"
168 "move.b %1,%%d1\n"
169 "move.l %2,%%a1\n"
170 "move.l 4.w,%%a6\n"
171 "lea .esuper040(%%pc),%%a5\n"
172 "jsr -0x1e(%%a6)\n"
173 "bra.s 0f\n"
174 ".esuper040:\n"
175 /* Do not interrupt us */
176 "or #0x0700,%%sr\n"
177 "movec %%vbr,%%a5\n"
178 "move.l %%a1,253*4(%%a5)\n"
179 "lea buserror040,%%a6\n"
180 "lea addrerror040,%%a0\n"
181 "tst.b %%d1\n"
182 "beq.s .cpu040\n"
183 "lea buserror060,%%a6\n"
184 "lea addrerror060,%%a0\n"
185 ".cpu040:\n"
186 "move.l %%a6,%%a5@(8)\n"
187 "move.l %%a0,%%a5@(12)\n"
188 "moveq #0,%%d1\n"
189 /* Disable MMU, setup root pointers */
190 "movec %%d1,%%tc\n"
191 "movec %%d0,%%urp\n"
192 "movec %%d0,%%srp\n"
193 /* Flush data caches and ATC */
194 "cpusha %%dc\n"
195 "cinva %%dc\n"
196 "pflusha\n"
197 /* Enable MMU, 4K page size */
198 "move.l #0x00008000,%%d0\n"
199 "movec %%d0,%%tc\n"
200 /* Disable transparent translation */
201 "movec %%d1,%%itt0\n"
202 "movec %%d1,%%itt1\n"
203 "movec %%d1,%%dtt0\n"
204 "movec %%d1,%%dtt1\n"
205 "rte\n"
206 "0:\n"
207 : : "m" (levela), "m" (cpu060), "m" (zeropagedescriptor) : "d0", "d1", "a1", "a5", "a6");
210 static void disable_mmu040(void)
212 asm volatile (
213 ".chip 68060\n"
214 "move.l 4.w,%%a6\n"
215 "lea .dsuper040(%%pc),%%a5\n"
216 "jsr -0x1e(%%a6)\n"
217 "bra.s 0f\n"
218 ".dsuper040:\n"
219 /* Do not interrupt us */
220 "or #0x0700,%%sr\n"
221 /* Disable MMU */
222 "moveq #0,%%d0\n"
223 "movec %%d0,%%tc\n"
224 "pflusha\n"
225 "rte\n"
226 "0:\n"
227 : : : "d0", "d1", "a5", "a6");
230 void enable_mmu(struct KernelBase *kb)
232 if (!kb->kb_PlatformData->mmu_type)
233 return;
234 if (kb->kb_PlatformData->mmu_type == MMU030)
235 enable_mmu030(kb->kb_PlatformData->MMU_Level_A);
236 else
237 enable_mmu040(kb->kb_PlatformData->MMU_Level_A, kb->kb_PlatformData->mmu_type == MMU060, kb->kb_PlatformData->zeropagedescriptor);
239 void disable_mmu(struct KernelBase *kb)
241 if (!kb->kb_PlatformData->mmu_type)
242 return;
243 if (kb->kb_PlatformData->mmu_type == MMU030)
244 disable_mmu030();
245 else
246 disable_mmu040();
249 #if DEBUG
250 static ULONG getdesc(struct KernelBase *kb, ULONG addr)
252 ULONG desc;
254 desc = LEVELA(kb->kb_PlatformData->MMU_Level_A, addr);
255 if (ISINVALID(desc))
256 return desc;
257 desc = LEVELB(desc, addr);
258 if (ISINVALID(desc))
259 return desc;
260 desc = LEVELC(desc, addr);
261 return desc;
263 #endif
265 void debug_mmu(struct KernelBase *kb)
267 #if DEBUG
268 UBYTE mmutype;
269 ULONG i;
270 ULONG startaddr;
271 ULONG odesc;
272 ULONG totalpages;
273 ULONG pagemask = (1 << PAGE_SIZE) - 1;
275 mmutype = kb->kb_PlatformData->mmu_type;
276 if (!mmutype || kb->kb_PlatformData->MMU_Level_A == NULL)
277 return;
278 bug("MMU dump start. Root = %p\n", kb->kb_PlatformData->MMU_Level_A);
279 totalpages = 1 << (32 - PAGE_SIZE);
280 startaddr = 0;
281 odesc = getdesc(kb, startaddr);
282 for (i = 0; i <= totalpages; i++) {
283 ULONG addr = i << PAGE_SIZE;
284 ULONG desc = 0;
285 if (i < totalpages)
286 desc = getdesc(kb, addr);
287 if ((desc & pagemask) != (odesc & pagemask) || i == totalpages) {
288 UBYTE cm, sp;
289 if (mmutype == MMU030) {
290 cm = (odesc >> 6) & 1;
291 sp = 0;
292 } else {
293 cm = (odesc >> 5) & 3;
294 sp = (odesc >> 7) & 1;
296 bug("%p - %p: %p WP=%d S=%d CM=%d (%08x)\n",
297 startaddr, addr - 1, odesc & ~((1 << PAGE_SIZE) - 1),
298 (odesc & 4) ? 1 : 0, sp, cm, odesc);
299 startaddr = addr;
300 odesc = desc;
303 bug("MMU dump end\n");
304 #endif
307 static BOOL map_region2(struct KernelBase *kb, void *addr, void *physaddr, ULONG size, BOOL invalid, BOOL writeprotect, BOOL supervisor, UBYTE cachemode)
309 struct PlatformData *pd = kb->kb_PlatformData;
310 ULONG desca, descb, descc, pagedescriptor;
311 ULONG page_size = 1 << PAGE_SIZE;
312 ULONG page_mask = page_size - 1;
313 UBYTE mmutype;
315 mmutype = pd->mmu_type;
316 if (!mmutype)
317 return FALSE;
318 if (kb->kb_PlatformData->MMU_Level_A == NULL)
319 return FALSE;
321 if ((size & page_mask) || (((ULONG)addr) & page_mask) || (((ULONG)physaddr) & page_mask)) {
322 D(bug("unaligned MMU page request! %p (%p) %08x\n", addr, physaddr, size));
323 return FALSE;
325 if (physaddr == NULL)
326 physaddr = addr;
328 while (size) {
329 desca = LEVELA(kb->kb_PlatformData->MMU_Level_A, addr);
330 if (ISINVALID(desca))
331 desca = LEVELA(kb->kb_PlatformData->MMU_Level_A, addr) = alloc_descriptor(kb, mmutype, LEVELB_SIZE, 1);
332 if (ISINVALID(desca))
333 return FALSE;
334 descb = LEVELB(desca, addr);
335 if (ISINVALID(descb))
336 descb = LEVELB(desca, addr) = alloc_descriptor(kb, mmutype, LEVELC_SIZE, 2);
337 if (ISINVALID(descb))
338 return FALSE;
339 descc = LEVELC(descb, addr);
341 if (addr == 0 && pd->zeropagedescriptor == NULL) {
342 /* special case zero page handling */
343 pd->zeropagedescriptor = (UBYTE*)(& LEVELC(descb, addr)) + 3;
346 if (invalid) {
347 pagedescriptor = INVALID_DESCRIPTOR;
348 if (addr == 0 && size == page_size) {
349 pagedescriptor = ((ULONG)physaddr) & ~page_mask;
350 if (mmutype == MMU030) {
351 pagedescriptor |= 4;
352 pagedescriptor |= 1 << 6;
353 } else {
354 pagedescriptor |= 4; // write-protected
355 pagedescriptor |= CM_SERIALIZED << 5;
358 } else {
359 BOOL wasinvalid = ISINVALID(descc);
360 pagedescriptor = ((ULONG)physaddr) & ~page_mask;
361 if (mmutype == MMU030) {
362 pagedescriptor |= 1; // page descriptor
363 if (writeprotect || (!wasinvalid && (descc & 4)))
364 pagedescriptor |= 4; // write-protected
365 /* 68030 can only enable or disable caching */
366 if (cachemode >= CM_SERIALIZED || (!wasinvalid && (descc & (1 << 6))))
367 pagedescriptor |= 1 << 6;
368 } else {
369 pagedescriptor |= 3; // resident page
370 if (writeprotect || (!wasinvalid && (descc & 4)))
371 pagedescriptor |= 4; // write-protected
372 if (supervisor || (!wasinvalid && (descc & (1 << 7))))
373 pagedescriptor |= 1 << 7;
374 // do not override non-cached
375 if (wasinvalid || cachemode > ((descc >> 5) & 3))
376 pagedescriptor |= cachemode << 5;
377 else
378 pagedescriptor |= ((descc >> 5) & 3) << 5;
379 if (addr != 0 || size != page_size)
380 pagedescriptor |= 1 << 10; // global if not zero page
384 LEVELC(descb, addr) = pagedescriptor;
385 size -= page_size;
386 addr += page_size;
387 physaddr += page_size;
390 return TRUE;
393 BOOL map_region(struct KernelBase *kb, void *addr, void *physaddr, ULONG size, BOOL invalid, BOOL writeprotect, BOOL supervisor, UBYTE cachemode)
395 D(bug("map_region(%p, %p, %08x, in=%d, wp=%d, s=%d cm=%d\n",
396 addr, physaddr, size, invalid ? 1 : 0, writeprotect ? 1 : 0, supervisor ? 1 : 0, cachemode));
397 return map_region2(kb, addr, physaddr, size, invalid, writeprotect, supervisor, cachemode);
400 BOOL unmap_region(struct KernelBase *kb, void *addr, ULONG size)
402 D(bug("unmap_region(%p, %08x)\n", addr, size));
403 return map_region2(kb, addr, NULL, size, TRUE, FALSE, FALSE, 0);