Don't print warning about unassigned interrupt lines on bridges.
[AROS.git] / arch / m68k-all / kernel / mmu.c
blob5394cb3021ed8ff8b7be65dd9ed0e69b08d72bed
2 #include <aros/debug.h>
3 #include <proto/exec.h>
5 #include "kernel_base.h"
6 #include "kernel_intern.h"
8 /* 68030 (68851), 68040 and 68060 supported, 68030 (68851) is configured like a 68040,
9 * no 68030 special features used, not worth the extra complexity */
11 #define LEVELA_SIZE 7
12 #define LEVELB_SIZE 7
13 #define LEVELC_SIZE 6
14 #define PAGE_SIZE 12 // = 1 << 12 = 4096
16 /* Macros that hopefully make MMU magic a bit easier to understand.. */
18 #define LEVELA_VAL(x) ((((ULONG)(x)) >> (32 - (LEVELA_SIZE ))) & ((1 << LEVELA_SIZE) - 1))
19 #define LEVELB_VAL(x) ((((ULONG)(x)) >> (32 - (LEVELA_SIZE + LEVELB_SIZE ))) & ((1 << LEVELB_SIZE) - 1))
20 #define LEVELC_VAL(x) ((((ULONG)(x)) >> (32 - (LEVELA_SIZE + LEVELB_SIZE + LEVELC_SIZE))) & ((1 << LEVELC_SIZE) - 1))
22 #define LEVELA(root, x) (root[LEVELA_VAL(x)])
23 #define LEVELB(a, x) (((ULONG*)(((ULONG)a) & ~((1 << (LEVELB_SIZE + 2)) - 1)))[LEVELB_VAL(x)])
24 #define LEVELC(b, x) (((ULONG*)(((ULONG)b) & ~((1 << (LEVELC_SIZE + 2)) - 1)))[LEVELC_VAL(x)])
26 #define INVALID_DESCRIPTOR 0xDEAD0000
27 #define ISINVALID(x) ((((ULONG)x) & 3) == 0)
29 static BOOL map_region2(struct KernelBase *kb, void *addr, void *physaddr, ULONG size, BOOL invalid, BOOL writeprotect, BOOL supervisor, UBYTE cachemode);
32 static void map_pagetable(struct KernelBase *kb, void *addr, ULONG size)
34 /* 68040+ MMU tables should be serialized */
35 map_region2(kb, addr, NULL, size, FALSE, FALSE, FALSE, CM_SERIALIZED);
38 /* Allocate MMU descriptor page, it needs to be (1 << bits) * sizeof(ULONG) aligned */
39 static ULONG alloc_descriptor(struct KernelBase *kb, UBYTE mmutype, UBYTE bits, UBYTE level)
41 struct PlatformData *pd = kb->kb_PlatformData;
42 ULONG *desc, dout;
43 ULONG size = sizeof(ULONG) * (1 << bits);
44 ULONG ps = 1 << PAGE_SIZE;
45 UWORD i;
47 while (pd->page_free >= size && (((ULONG)pd->page_ptr) & (size - 1))) {
48 pd->page_ptr += 0x100;
49 pd->page_free -= 0x100;
51 while (pd->page_free < size) {
52 /* allocate in aligned blocks of PAGE_SIZE */
53 UBYTE *mem, *newmem, *pagemem;
55 mem = AllocMem(2 * ps, MEMF_PUBLIC);
56 if (!mem)
57 return 0;
58 Forbid();
59 FreeMem(mem, 2 * ps);
60 newmem = (UBYTE*)((((ULONG)mem) + ps - 1) & ~(ps - 1));
61 pagemem = AllocAbs(ps, newmem);
62 Permit();
63 if (!pagemem)
64 return 0;
65 pd->page_ptr = pagemem;
66 pd->page_free = ps;
67 // bug("New chunk %p-%p\n", pagemem, pagemem + ps - 1);
68 if (level > 0 && mmutype >= MMU040)
69 map_pagetable(kb, pagemem, ps);
71 desc = (ULONG*)pd->page_ptr;
72 for (i = 0; i < (1 << bits); i++)
73 desc[i] = INVALID_DESCRIPTOR;
74 dout = (ULONG)desc;
75 if (mmutype == MMU030)
76 dout |= 2; /* Valid 4 byte descriptor */
77 else
78 dout |= 3; /* Resident descriptor */
79 // bug("Level%c %p-%p: %08x\n", level + 'A', pd->page_ptr, pd->page_ptr + size - 1, dout);
80 pd->page_ptr += size;
81 pd->page_free -= size;
82 return dout;
85 BOOL init_mmu(struct KernelBase *kb)
87 UBYTE mmutype = kb->kb_PlatformData->mmu_type;
89 if (!mmutype)
90 return FALSE;
91 kb->kb_PlatformData->MMU_Level_A = (ULONG*)(alloc_descriptor(kb, mmutype, LEVELA_SIZE, 0) & ~3);
92 if (!kb->kb_PlatformData->MMU_Level_A) {
93 kb->kb_PlatformData->mmu_type = 0;
94 return FALSE;
96 if (mmutype >= MMU040)
97 map_pagetable(kb, kb->kb_PlatformData->MMU_Level_A, 1 << PAGE_SIZE);
98 return TRUE;
101 static void enable_mmu030(ULONG *levela)
103 asm volatile (
104 ".chip 68030\n"
105 "move.l %0,%%d0\n"
106 "move.l 4.w,%%a6\n"
107 "lea .esuper030(%%pc),%%a5\n"
108 "jsr -0x1e(%%a6)\n"
109 "bra.s 0f\n"
110 ".esuper030:\n"
111 /* Do not interrupt us */
112 "or #0x0700,%%sr\n"
113 "subq.l #8,%%a7\n"
114 /* Disable MMU, setup root pointers,
115 * uses 68040 MMU descriptor levels (7/7/6, 4K page size) */
116 "move.l #0x00c07760,%%d1\n"
117 "move.l %%d1,%%a7@\n"
118 "pmove %%a7@,%%tc\n"
119 /* Set bus error exception vector */
120 "movec %%vbr,%%a5\n"
121 "move.l #buserror030,%%a5@(8)\n"
122 /* Configure CRP. Valid 4 byte descriptor, other features disabled. */
123 "move.l #0x80000002,%%a7@\n"
124 /* First level descriptor pointer */
125 "move.l %%d0,%%a7@(4)\n"
126 /* Set CRP */
127 "pmove %%a7@,%%crp\n"
128 /* Set MMU enabled bit */
129 "bset #31,%%d1\n"
130 "move.l %%d1,%%a7@\n"
131 /* MMU on! */
132 "pmove %%a7@,%%tc\n"
133 /* Clear transparent translation */
134 "clr.l %%a7@\n"
135 "pmove %%a7@,%%tt0\n"
136 "pmove %%a7@,%%tt1\n"
137 "addq.l #8,%%a7\n"
138 "rte\n"
139 "0:\n"
140 : : "m" (levela) : "d0", "d1", "a5", "a6");
142 static void disable_mmu030(void)
144 asm volatile (
145 ".chip 68030\n"
146 "move.l 4.w,%%a6\n"
147 "lea .dsuper030(%%pc),%%a5\n"
148 "jsr -0x1e(%%a6)\n"
149 "bra.s 0f\n"
150 ".dsuper030:\n"
151 /* Do not interrupt us */
152 "or #0x0700,%%sr\n"
153 /* Disable MMU */
154 "subq.l #4,%%a7\n"
155 "clr.l %%a7@\n"
156 "pmove %%a7@,%%tc\n"
157 "addq.l #4,%%a7\n"
158 "rte\n"
159 "0:\n"
160 : : : "d0", "d1", "a5", "a6");
162 static void enable_mmu040(ULONG *levela, UBYTE cpu060, UBYTE *zeropagedescriptor)
164 asm volatile (
165 ".chip 68060\n"
166 "move.l %0,%%d0\n"
167 "move.b %1,%%d1\n"
168 "move.l %2,%%a1\n"
169 "move.l 4.w,%%a6\n"
170 "lea .esuper040(%%pc),%%a5\n"
171 "jsr -0x1e(%%a6)\n"
172 "bra.s 0f\n"
173 ".esuper040:\n"
174 /* Do not interrupt us */
175 "or #0x0700,%%sr\n"
176 "movec %%vbr,%%a5\n"
177 "move.l %%a1,253*4(%%a5)\n"
178 "lea buserror040,%%a6\n"
179 "tst.b %%d1\n"
180 "beq.s .cpu040\n"
181 "lea buserror060,%%a6\n"
182 ".cpu040:\n"
183 "move.l %%a6,%%a5@(8)\n"
184 "moveq #0,%%d1\n"
185 /* Disable MMU, setup root pointers */
186 "movec %%d1,%%tc\n"
187 "movec %%d0,%%urp\n"
188 "movec %%d0,%%srp\n"
189 /* Flush data caches and ATC */
190 "cpusha %%dc\n"
191 "cinva %%dc\n"
192 "pflusha\n"
193 /* Enable MMU, 4K page size */
194 "move.l #0x00008000,%%d0\n"
195 "movec %%d0,%%tc\n"
196 /* Disable transparent translation */
197 "movec %%d1,%%itt0\n"
198 "movec %%d1,%%itt1\n"
199 "movec %%d1,%%dtt0\n"
200 "movec %%d1,%%dtt1\n"
201 "rte\n"
202 "0:\n"
203 : : "m" (levela), "m" (cpu060), "m" (zeropagedescriptor) : "d0", "d1", "a1", "a5", "a6");
206 static void disable_mmu040(void)
208 asm volatile (
209 ".chip 68060\n"
210 "move.l 4.w,%%a6\n"
211 "lea .dsuper040(%%pc),%%a5\n"
212 "jsr -0x1e(%%a6)\n"
213 "bra.s 0f\n"
214 ".dsuper040:\n"
215 /* Do not interrupt us */
216 "or #0x0700,%%sr\n"
217 /* Disable MMU */
218 "moveq #0,%%d0\n"
219 "movec %%d0,%%tc\n"
220 "pflusha\n"
221 "rte\n"
222 "0:\n"
223 : : : "d0", "d1", "a5", "a6");
226 void enable_mmu(struct KernelBase *kb)
228 if (!kb->kb_PlatformData->mmu_type)
229 return;
230 if (kb->kb_PlatformData->mmu_type == MMU030)
231 enable_mmu030(kb->kb_PlatformData->MMU_Level_A);
232 else
233 enable_mmu040(kb->kb_PlatformData->MMU_Level_A, kb->kb_PlatformData->mmu_type == MMU060, kb->kb_PlatformData->zeropagedescriptor);
235 void disable_mmu(struct KernelBase *kb)
237 if (!kb->kb_PlatformData->mmu_type)
238 return;
239 if (kb->kb_PlatformData->mmu_type == MMU030)
240 disable_mmu030();
241 else
242 disable_mmu040();
245 static ULONG getdesc(struct KernelBase *kb, ULONG addr)
247 ULONG desc;
249 desc = LEVELA(kb->kb_PlatformData->MMU_Level_A, addr);
250 if (ISINVALID(desc))
251 return desc;
252 desc = LEVELB(desc, addr);
253 if (ISINVALID(desc))
254 return desc;
255 desc = LEVELC(desc, addr);
256 return desc;
259 void debug_mmu(struct KernelBase *kb)
261 UBYTE mmutype;
262 ULONG i;
263 ULONG startaddr;
264 ULONG odesc;
265 ULONG totalpages;
266 ULONG pagemask = (1 << PAGE_SIZE) - 1;
268 mmutype = kb->kb_PlatformData->mmu_type;
269 if (!mmutype || kb->kb_PlatformData->MMU_Level_A == NULL)
270 return;
271 bug("MMU dump start. Root = %p\n", kb->kb_PlatformData->MMU_Level_A);
272 totalpages = 1 << (32 - PAGE_SIZE);
273 startaddr = 0;
274 odesc = getdesc(kb, startaddr);
275 for (i = 0; i <= totalpages; i++) {
276 ULONG addr = i << PAGE_SIZE;
277 ULONG desc = 0;
278 if (i < totalpages)
279 desc = getdesc(kb, addr);
280 if ((desc & pagemask) != (odesc & pagemask) || i == totalpages) {
281 UBYTE cm, sp;
282 if (mmutype == MMU030) {
283 cm = (odesc >> 6) & 1;
284 sp = 0;
285 } else {
286 cm = (odesc >> 5) & 3;
287 sp = (odesc >> 7) & 1;
289 bug("%p - %p: %p WP=%d S=%d CM=%d (%08x)\n",
290 startaddr, addr - 1, odesc & ~((1 << PAGE_SIZE) - 1),
291 (odesc & 4) ? 1 : 0, sp, cm, odesc);
292 startaddr = addr;
293 odesc = desc;
296 bug("MMU dump end\n");
299 static BOOL map_region2(struct KernelBase *kb, void *addr, void *physaddr, ULONG size, BOOL invalid, BOOL writeprotect, BOOL supervisor, UBYTE cachemode)
301 struct PlatformData *pd = kb->kb_PlatformData;
302 ULONG desca, descb, descc, pagedescriptor;
303 ULONG page_size = 1 << PAGE_SIZE;
304 ULONG page_mask = page_size - 1;
305 UBYTE mmutype;
307 mmutype = pd->mmu_type;
308 if (!mmutype)
309 return FALSE;
310 if (kb->kb_PlatformData->MMU_Level_A == NULL)
311 return FALSE;
313 if ((size & page_mask) || (((ULONG)addr) & page_mask) || (((ULONG)physaddr) & page_mask)) {
314 bug("unaligned MMU page request! %p (%p) %08x\n", addr, physaddr, size);
315 return FALSE;
317 if (physaddr == NULL)
318 physaddr = addr;
320 while (size) {
321 desca = LEVELA(kb->kb_PlatformData->MMU_Level_A, addr);
322 if (ISINVALID(desca))
323 desca = LEVELA(kb->kb_PlatformData->MMU_Level_A, addr) = alloc_descriptor(kb, mmutype, LEVELB_SIZE, 1);
324 if (ISINVALID(desca))
325 return FALSE;
326 descb = LEVELB(desca, addr);
327 if (ISINVALID(descb))
328 descb = LEVELB(desca, addr) = alloc_descriptor(kb, mmutype, LEVELC_SIZE, 2);
329 if (ISINVALID(descb))
330 return FALSE;
331 descc = LEVELC(descb, addr);
333 if (invalid) {
334 pagedescriptor = INVALID_DESCRIPTOR;
335 if (addr == 0 && size == page_size) {
336 /* special case zero page handling */
337 pd->zeropagedescriptor = (UBYTE*)(& LEVELC(descb, addr)) + 3;
338 pagedescriptor = ((ULONG)physaddr) & ~page_mask;
339 if (mmutype == MMU030) {
340 pagedescriptor |= 4;
341 pagedescriptor |= 1 << 6;
342 } else {
343 pagedescriptor |= 4; // write-protected
344 pagedescriptor |= CM_SERIALIZED << 5;
347 } else {
348 BOOL wasinvalid = ISINVALID(descc);
349 pagedescriptor = ((ULONG)physaddr) & ~page_mask;
350 if (mmutype == MMU030) {
351 pagedescriptor |= 1; // page descriptor
352 if (writeprotect || (!wasinvalid && (descc & 4)))
353 pagedescriptor |= 4; // write-protected
354 /* 68030 can only enable or disable caching */
355 if (cachemode >= CM_SERIALIZED || (!wasinvalid && (descc & (1 << 6))))
356 pagedescriptor |= 1 << 6;
357 } else {
358 pagedescriptor |= 3; // resident page
359 if (writeprotect || (!wasinvalid && (descc & 4)))
360 pagedescriptor |= 4; // write-protected
361 if (supervisor || (!wasinvalid && (descc & (1 << 7))))
362 pagedescriptor |= 1 << 7;
363 // do not override non-cached
364 if (wasinvalid || cachemode > ((descc >> 5) & 3))
365 pagedescriptor |= cachemode << 5;
366 else
367 pagedescriptor |= ((descc >> 5) & 3) << 5;
368 if (addr != 0 || size != page_size)
369 pagedescriptor |= 1 << 10; // global if not zero page
373 LEVELC(descb, addr) = pagedescriptor;
374 size -= page_size;
375 addr += page_size;
376 physaddr += page_size;
379 return TRUE;
382 BOOL map_region(struct KernelBase *kb, void *addr, void *physaddr, ULONG size, BOOL invalid, BOOL writeprotect, BOOL supervisor, UBYTE cachemode)
384 bug("map_region(%p, %p, %08x, in=%d, wp=%d, s=%d cm=%d\n",
385 addr, physaddr, size, invalid ? 1 : 0, writeprotect ? 1 : 0, supervisor ? 1 : 0, cachemode);
386 return map_region2(kb, addr, physaddr, size, invalid, writeprotect, supervisor, cachemode);
389 BOOL unmap_region(struct KernelBase *kb, void *addr, ULONG size)
391 bug("unmap_region(%p, %08x)\n", addr, size);
392 return map_region2(kb, addr, NULL, size, TRUE, FALSE, FALSE, 0);