Bring back sam build into life.
[AROS.git] / arch / ppc-sam440 / kernel / mmu.c
blobd938d96bbaa37eba86f32bfa0d47bffb0623e14e
1 #include <asm/amcc440.h>
2 #include <asm/io.h>
3 #include <aros/kernel.h>
5 #include "kernel_intern.h"
7 static long tlb_bitmap[2] = {0xffffffff, 0xffffffff};
8 static long tlb_free = 64;
10 /* Alloc TLB in the bitmap. Returns -1 if the allocation cannot be done */
11 static int alloc_tlb()
13 /* It should be done in locked state only! */
15 int bit = __builtin_clz(tlb_bitmap[0]);
16 if (bit == 32)
18 bit += __builtin_clz(tlb_bitmap[1]);
20 else
22 tlb_bitmap[0] &= ~(0x80000000 >> bit);
24 if (bit == 64)
26 return -1;
28 else
30 tlb_bitmap[1] &= ~(0x80000000 >> (bit-32));
32 tlb_free--;
33 return bit;
36 static int free_tlb(int entry)
38 if (entry >=0 && entry < 32)
40 if (tlb_bitmap[0] & (0x80000000 >> entry))
42 D(bug("[KRN] Freeing already free TLB!!!\n"));
44 else
46 tlb_bitmap[0] |= (0x80000000 >> entry);
47 tlb_free++;
50 else if (entry < 64)
52 entry -= 32;
53 if (tlb_bitmap[1] & (0x80000000 >> entry))
55 D(bug("[KRN] Freeing already free TLB!!!\n"));
57 else
59 tlb_bitmap[1] |= (0x80000000 >> entry);
60 tlb_free++;
63 else
65 D(bug("[KRN] Wrong TLB\n"));
69 static struct mmu_page_size {
70 uint8_t code;
71 uintptr_t mask;
72 } allowable_pages[] = {
73 { 0x90, 0x0fffffff }, /* 256MB */
74 { 0x70, 0x00ffffff }, /* 16MB */
75 { 0x50, 0x000fffff }, /* 1MB */
76 { 0x40, 0x0003ffff }, /* 256KB */
77 { 0x30, 0x0000ffff }, /* 64KB */
78 { 0x20, 0x00003fff }, /* 16KB */
79 { 0x10, 0x00000fff }, /* 4KB */
80 { 0x00, 0x000003ff }, /* 1KB */
81 { 0xff, 0xffffffff }, /* END MARKER */
84 void map_region(uintptr_t physbase, uintptr_t virtbase, uintptr_t length, uint32_t prot)
86 long tlb_temp = tlb_free;
88 D(bug("[KRN] map_region(%08x, %08x, %08x, %04x): ", physbase, virtbase, length, prot));
90 /* While there is still something to map */
91 while (length)
93 int tlb;
94 int i = 0;
96 /* Check all available page sizes and try to match the best (the biggest) usable TLB entry */
97 while (allowable_pages[i].code != 0xff)
99 if ((length > allowable_pages[i].mask) && !(physbase & allowable_pages[i].mask) && !(virtbase & allowable_pages[i].mask))
100 break;
101 i++;
104 if (allowable_pages[i].code == 0xff)
106 D(bug("\n[KRN] map_region failed\n"));
107 return;
110 /* get free TLB */
111 tlb = alloc_tlb();
112 if (tlb == -1)
114 D(bug("\n[KRN] map_region: No more free TLB entries\n"));
115 return;
118 //D(bug("\n[KRN] TLB%02x: %08x - %08x : %08x - %08x: ", tlb,
119 //physbase, physbase + allowable_pages[i].mask,
120 //virtbase, virtbase + allowable_pages[i].mask));
122 /* Do really write to the tlb */
123 asm volatile("tlbwe %0,%3,0; tlbwe %1,%3,1; tlbwe %2,%3,2"
124 ::"r"(virtbase | allowable_pages[i].code | TLB_V), "r"(physbase), "r"(prot), "r"(tlb));
125 //D(bug("%08x %08x %08x ", virtbase | allowable_pages[i].code | 0x200, physbase, prot));
127 length -= allowable_pages[i].mask + 1;
128 physbase += allowable_pages[i].mask + 1;
129 virtbase += allowable_pages[i].mask + 1;
132 tlb_temp -= tlb_free;
133 D(bug("%2d TLB%s\n", tlb_temp, tlb_temp > 1 ? "s":""));
136 AROS_LH1(void *, KrnVirtualToPhysical,
137 AROS_LHA(void *, virtual, A0),
138 struct KernelBase *, KernelBase, 20, Kernel)
140 AROS_LIBFUNC_INIT
142 uintptr_t virt = (uintptr_t)virtual;
143 uintptr_t phys = virt;
145 if (virt >= 0xff000000)
146 phys = virt - 0xff000000;
148 return (void*)phys;
150 AROS_LIBFUNC_EXIT
153 void mmu_init(struct TagItem *tags)
155 uintptr_t krn_lowest = krnGetTagData(KRN_KernelLowest, 0, tags);
156 uintptr_t krn_highest = krnGetTagData(KRN_KernelHighest, 0, tags);
157 uintptr_t krn_base = krnGetTagData(KRN_KernelBase, 0, tags);
159 D(bug("[KRN] MMU Init\n"));
160 D(bug("[KRN] lowest = %p, base = %p, highest = %p\n", krn_lowest, krn_base, krn_highest));
161 D(bug("[KRN] Kernel size: %dKB code, %dKB data\n", (krn_highest - krn_base)/1024, (krn_base - krn_lowest)/1024));
164 * In order to reduce the usage of TLB entries, align the kernel regions.
165 * It wastes a tiny bit of RAM but saves a lot of TLB entries.
168 /* 4K granularity for data sections */
169 krn_lowest &= 0xfffff000;
170 /* 64K granularity for code sections */
171 krn_highest = (krn_highest + 0xffff) & 0xffff0000;
174 * The very first entry has to cover the executable part of kernel,
175 * where exception handlers are located
177 map_region(krn_base, 0xff000000 + krn_base, krn_highest - krn_base, TLB_SR | TLB_SX | TLB_UR | TLB_UX);
178 /* Now the data area for kernel. Make it read/write for both user and supervisor. No execution allowed */
179 map_region(krn_lowest, 0xff000000 + krn_lowest, krn_base - krn_lowest, TLB_SR | TLB_SW | TLB_UR | TLB_UW);
180 /* The low memory will be RW assigned to the supervisor mode. No access from usermode! */
181 map_region(0, 0xff000000, krn_lowest, TLB_SR | TLB_SW);
183 /* The regular RAM, make 1GB of it - amcc440 cannot do more. */
184 map_region(krn_highest, krn_highest, 0x40000000 - krn_highest, TLB_SR | TLB_SW | TLB_UR | TLB_UW | TLB_SX | TLB_UX);
185 /* map the PCI bus */
186 map_region(0xa0000000, 0xa0000000, 0x40000000, TLB_SR | TLB_SW | TLB_UR | TLB_UW | TLB_G | TLB_I );
187 /* PCI control registers and onboard devices */
188 map_region(0xe0000000, 0xe0000000, 0x10000000, TLB_SR | TLB_SW | TLB_UR | TLB_UW | TLB_G | TLB_I);
190 D(bug("[KRN] TLB status: %d used, %d free\n", 64 - tlb_free, tlb_free));
192 /* flush TLB shadow regs */
193 asm volatile("isync;");