1 #include <aros/debug.h>
2 #include <asm/amcc440.h>
4 #include <aros/kernel.h>
6 #include "kernel_intern.h"
13 /* Alloc TLB in the bitmap. Returns -1 if the allocation cannot be done */
14 static int alloc_tlb(struct tlb_info
*info
)
16 /* It should be done in locked state only! */
18 int bit
= __builtin_clz(info
->bitmap
[0]);
21 bit
+= __builtin_clz(info
->bitmap
[1]);
25 info
->bitmap
[0] &= ~(0x80000000 >> bit
);
33 info
->bitmap
[1] &= ~(0x80000000 >> (bit
-32));
39 static void free_tlb(struct tlb_info
*info
, int entry
)
41 if (entry
>=0 && entry
< 32)
43 asm volatile("tlbwe %0,%1,0;" ::"r"(0), "r"(entry
));
44 if (info
->bitmap
[0] & (0x80000000 >> entry
))
46 D(bug("[KRN] Freeing already free TLB!!!\n"));
50 info
->bitmap
[0] |= (0x80000000 >> entry
);
56 asm volatile("tlbwe %0,%1,0;" ::"r"(0), "r"(entry
));
58 if (info
->bitmap
[1] & (0x80000000 >> entry
))
60 D(bug("[KRN] Freeing already free TLB!!!\n"));
64 info
->bitmap
[1] |= (0x80000000 >> entry
);
70 D(bug("[KRN] Wrong TLB\n"));
75 static struct mmu_page_size
{
78 } allowable_pages
[] = {
79 { 0x90, 0x0fffffff }, /* 256MB */
80 { 0x70, 0x00ffffff }, /* 16MB */
81 { 0x50, 0x000fffff }, /* 1MB */
82 { 0x40, 0x0003ffff }, /* 256KB */
83 { 0x30, 0x0000ffff }, /* 64KB */
84 { 0x20, 0x00003fff }, /* 16KB */
85 { 0x10, 0x00000fff }, /* 4KB */
86 { 0x00, 0x000003ff }, /* 1KB */
87 { 0xff, 0xffffffff }, /* END MARKER */
90 void map_region(struct tlb_info
*info
, uint8_t extra
, uintptr_t physbase
, uintptr_t virtbase
, uintptr_t length
, uint32_t prot
)
92 long tlb_temp
= info
->free
;
94 D(bug("[KRN] map_region(%08x, %08x, %08x, %04x): ", physbase
, virtbase
, length
, prot
));
96 /* While there is still something to map */
102 /* Check all available page sizes and try to match the best (the biggest) usable TLB entry */
103 while (allowable_pages
[i
].code
!= 0xff)
105 if ((length
> allowable_pages
[i
].mask
) && !(physbase
& allowable_pages
[i
].mask
) && !(virtbase
& allowable_pages
[i
].mask
))
110 if (allowable_pages
[i
].code
== 0xff)
112 D(bug("\n[KRN] map_region failed\n"));
117 tlb
= alloc_tlb(info
);
120 D(bug("\n[KRN] map_region: No more free TLB entries\n"));
124 //D(bug("\n[KRN] TLB%02x: %08x - %08x : %08x - %08x: ", tlb,
125 //physbase, physbase + allowable_pages[i].mask,
126 //virtbase, virtbase + allowable_pages[i].mask));
128 /* Do really write to the tlb */
129 asm volatile("tlbwe %0,%3,0; tlbwe %1,%3,1; tlbwe %2,%3,2"
130 ::"r"(virtbase
| allowable_pages
[i
].code
| TLB_V
), "r"(physbase
| extra
), "r"(prot
), "r"(tlb
));
131 //D(bug("%08x %08x %08x ", virtbase | allowable_pages[i].code | 0x200, physbase, prot));
133 length
-= allowable_pages
[i
].mask
+ 1;
134 physbase
+= allowable_pages
[i
].mask
+ 1;
135 virtbase
+= allowable_pages
[i
].mask
+ 1;
138 tlb_temp
-= info
->free
;
139 D(bug("%2d TLB%s\n", tlb_temp
, tlb_temp
> 1 ? "s":""));
142 static void free_remaining(struct tlb_info
*info
)
149 tlb
= alloc_tlb(info
);
150 D(bug("[KRN] TLB%02x: Clear\n", tlb
));
151 free_remaining(info
);
156 struct tlb
{ int tlb
; uint32_t reg
[3]; };
158 static void tlb_dump_entry(const struct tlb
*tlb
)
160 uint32_t tlb_0
, tlb_1
, tlb_2
;
161 uint32_t phys
, virt
, size
;
167 if (!(tlb_0
& TLB_V
))
170 size
= 1024 << (2 * ((tlb_0
>> 4) & 0xf));
171 virt
= tlb_0
& ~((1 << 10)-1);
172 phys
= tlb_1
& ~((1 << 10)-1);
174 D(bug("[KRN] TLB%02x: ",tlb
->tlb
));
175 D(bug("%c%c%c%c%c%c%c%c%c%c%c ",
176 (tlb_2
& TLB_W
) ? 'W' : '-',
177 (tlb_2
& TLB_I
) ? 'I' : '-',
178 (tlb_2
& TLB_M
) ? 'M' : '-',
179 (tlb_2
& TLB_G
) ? 'G' : '-',
180 (tlb_2
& TLB_E
) ? 'E' : '-',
181 (tlb_2
& TLB_UR
) ? 'r' : '-',
182 (tlb_2
& TLB_UW
) ? 'w' : '-',
183 (tlb_2
& TLB_UX
) ? 'x' : '-',
184 (tlb_2
& TLB_SR
) ? 'r' : '-',
185 (tlb_2
& TLB_SW
) ? 'w' : '-',
186 (tlb_2
& TLB_SX
) ? 'x' : '-'));
187 D(bug("%08x - %08x : %08x: 0:%08x 1:%08x 2:%08x\n",
188 virt
, virt
+ size
- 1, phys
,
189 tlb_0
, tlb_1
, tlb_2
));
192 static int tlbcmp(const void *a
, const void *b
)
194 const struct tlb
*ta
=a
, *tb
= b
;
196 if (ta
->reg
[0] < tb
->reg
[0])
198 if (ta
->reg
[0] == tb
->reg
[0])
205 static void tlb_dump(void)
209 D(static int some_bss
);
210 D(static int some_data
=0xdeadcafe);
212 D(bug("[KRN] Executing at %p, stack at %p, bss at %p, data at %p\n", __builtin_return_address(0), __builtin_frame_address(0), &some_bss
, &some_data
));
213 for (tlb
= 0; tlb
< 64; tlb
++) {
214 asm volatile("tlbre %0,%3,0; tlbre %1,%3,1; tlbre %2,%3,2"
215 :"=r" (tlbs
[tlb
].reg
[0]),
216 "=r" (tlbs
[tlb
].reg
[1]),
217 "=r" (tlbs
[tlb
].reg
[2])
222 qsort(tlbs
, 64, sizeof(tlbs
[0]), tlbcmp
);
224 for (tlb
= 0; tlb
< 64; tlb
++)
225 tlb_dump_entry(&tlbs
[tlb
]);
229 void mmu_init(struct TagItem
*tags
)
231 uintptr_t krn_lowest
= krnGetTagData(KRN_KernelLowest
, 0, tags
);
232 uintptr_t krn_highest
= krnGetTagData(KRN_KernelHighest
, 0, tags
);
233 uintptr_t krn_base
= krnGetTagData(KRN_KernelBase
, 0, tags
);
234 struct tlb_info info
= {
235 .bitmap
= { ~0, ~0 },
239 uint32_t pvr
= rdspr(PVR
);
241 D(bug("[KRN] MMU Init\n"));
242 D(bug("[KRN] lowest = %p, base = %p, highest = %p\n", krn_lowest
, krn_base
, krn_highest
));
243 D(bug("[KRN] Kernel size: %dKB code, %dKB data\n", (krn_highest
- krn_base
)/1024, (krn_base
- krn_lowest
)/1024));
248 * In order to reduce the usage of TLB entries, align the kernel regions.
249 * It wastes a tiny bit of RAM but saves a lot of TLB entries.
252 /* 4K granularity for data sections */
253 krn_lowest
&= 0xfffff000;
254 /* 64K granularity for code sections */
255 krn_highest
= (krn_highest
+ 0xffff) & 0xffff0000;
258 * The very first entry has to cover the executable part of kernel,
259 * where exception handlers are located
261 map_region(&info
, 0x0, krn_base
, 0xff000000 + krn_base
, krn_highest
- krn_base
, TLB_SR
| TLB_SX
| TLB_UR
| TLB_UX
);
262 /* Now the data area for kernel. Make it read/write for both user and supervisor. No execution allowed */
263 map_region(&info
, 0x0, krn_lowest
, 0xff000000 + krn_lowest
, krn_base
- krn_lowest
, TLB_SR
| TLB_SW
| TLB_UR
| TLB_UW
);
264 /* The low memory will be RW assigned to the supervisor mode. No access from usermode! */
265 map_region(&info
, 0x0, 0, 0xff000000, krn_lowest
, TLB_SR
| TLB_SW
);
267 /* The regular RAM, make 1GB of it - amcc440 cannot do more. */
268 map_region(&info
, 0x0, krn_highest
, krn_highest
, 0x40000000 - krn_highest
, TLB_SR
| TLB_SW
| TLB_UR
| TLB_UW
| TLB_SX
| TLB_UX
);
270 if (krnIsPPC440(pvr
)) {
271 D(bug("[KRN] MMU: Configure for PPC440\n"));
272 /* map some 440EP peripherials bus */
273 map_region(&info
, 0x0, 0x80000000, 0x80000000, 0x20000000, TLB_SR
| TLB_SW
| TLB_UR
| TLB_UW
| TLB_G
| TLB_I
);
274 /* map the PCI bus */
275 map_region(&info
, 0x0, 0xa0000000, 0xa0000000, 0x40000000, TLB_SR
| TLB_SW
| TLB_UR
| TLB_UW
| TLB_G
| TLB_I
);
276 /* PCI control registers and onboard devices */
277 map_region(&info
, 0x0, 0xe0000000, 0xe0000000, 0x10000000, TLB_SR
| TLB_SW
| TLB_UR
| TLB_UW
| TLB_G
| TLB_I
);
278 } else if (krnIsPPC460(pvr
)) {
279 D(bug("[KRN] MMU: Configure for PPC460\n"));
280 /* map some 460EX peripherials bus */
281 map_region(&info
, 0xc, 0x80000000, 0x80000000, 0x20000000, TLB_SR
| TLB_SW
| TLB_UR
| TLB_UW
| TLB_G
| TLB_I
);
282 /* UART control registers and onboard devices */
283 map_region(&info
, 0x4, 0xe0000000, 0xe0000000, 0x10000000, TLB_SR
| TLB_SW
| TLB_UR
| TLB_UW
| TLB_G
| TLB_I
);
285 bug("[KRN] MMU: Cannot configure - unknown PVR model 0x%08x\n", pvr
);
289 free_remaining(&info
);
292 D(bug("[KRN] TLB status: %d used, %d free\n", 64 - info
.free
, info
.free
));
294 /* flush TLB shadow regs */
295 asm volatile("isync;");