make sure smpboot is generated in the correct directory
[AROS.git] / arch / x86_64-pc / kernel / mmu.c
blobb579105da6985446a757624186a235811cf7d63d
1 /*
2 Copyright © 1995-2017, The AROS Development Team. All rights reserved.
3 $Id$
4 */
6 #include <asm/cpu.h>
7 #include <exec/types.h>
8 #include "kernel_base.h"
9 #include "kernel_intern.h"
10 #include "kernel_bootmem.h"
11 #include "kernel_debug.h"
12 #include "apic.h"
14 #define D(x)
15 #define DMMU(x)
17 void core_InitMMU(struct CPUMMUConfig *MMU)
19 struct PML4E *PML4;
20 struct PDPE *PDP;
21 struct PDE2M *PDE;
22 unsigned int i;
24 PML4 = MMU->mmu_PML4;
25 PDP = MMU->mmu_PDP;
26 PDE = MMU->mmu_PDE;
28 /* PML4 Entry - we need only the first out of 16 entries */
29 PML4[0].p = 1; /* present */
30 PML4[0].rw = 1; /* read/write */
31 PML4[0].us = 1; /* accessible for user */
32 PML4[0].pwt= 0; /* write-through cache */
33 PML4[0].pcd= 0; /* cache enabled */
34 PML4[0].a = 0; /* not yet accessed */
35 PML4[0].mbz= 0; /* must be zero */
36 PML4[0].base_low = ((IPTR)PDP) >> 12;
37 PML4[0].avl= 0;
38 PML4[0].nx = 0;
39 PML4[0].avail = 0;
40 PML4[0].base_high = (((IPTR)PDP) >> 32) & 0x000FFFFF;
42 for (i = 0; i < MMU->mmu_PDEPageCount; i++)
44 /* For every 512th page create the directory entry */
45 if ((i % 512) == 0)
47 IPTR pdes = (IPTR)&PDE[i];
48 int idx = i / 512;
50 /* Set the PDP entry up and point to the PDE table */
51 PDP[idx].p = 1;
52 PDP[idx].rw = 1;
53 PDP[idx].us = 1;
54 PDP[idx].pwt= 0;
55 PDP[idx].pcd= 0;
56 PDP[idx].a = 0;
57 PDP[idx].mbz= 0;
58 PDP[idx].base_low = pdes >> 12;
60 PDP[idx].nx = 0;
61 PDP[idx].avail = 0;
62 PDP[idx].base_high = (pdes >> 32) & 0x000FFFFF;
65 /* Set PDE entries - use 2MB memory pages, with full supervisor and user access */
66 unsigned long base = (((IPTR)i) << 21);
68 PDE[i].p = 1;
69 PDE[i].rw = 1;
70 PDE[i].us = 1;
71 PDE[i].pwt= 0; // 1
72 PDE[i].pcd= 0; // 1
73 PDE[i].a = 0;
74 PDE[i].d = 0;
75 PDE[i].g = 0;
76 PDE[i].pat= 0;
77 PDE[i].ps = 1;
78 PDE[i].base_low = base >> 13;
80 PDE[i].avail = 0;
81 PDE[i].nx = 0;
82 PDE[i].base_high = (base >> 32) & 0x000FFFFF;
85 #if 0
86 /* PDP Entries. There are four of them used in order to define 2048 pages of 2MB each. */
87 for (i = 0; i < 4; i++)
89 struct PDE2M *pdes = &PDE[512 * i];
90 unsigned int j;
92 /* Set the PDP entry up and point to the PDE table */
93 PDP[i].p = 1;
94 PDP[i].rw = 1;
95 PDP[i].us = 1;
96 PDP[i].pwt= 0;
97 PDP[i].pcd= 0;
98 PDP[i].a = 0;
99 PDP[i].mbz= 0;
100 PDP[i].base_low = (unsigned long)pdes >> 12;
102 PDP[i].nx = 0;
103 PDP[i].avail = 0;
104 PDP[i].base_high = ((unsigned long)pdes >> 32) & 0x000FFFFF;
106 for (j=0; j < 512; j++)
108 /* Set PDE entries - use 2MB memory pages, with full supervisor and user access */
109 unsigned long base = (i << 30) + (j << 21);
111 pdes[j].p = 1;
112 pdes[j].rw = 1;
113 pdes[j].us = 1;
114 pdes[j].pwt= 0; // 1
115 pdes[j].pcd= 0; // 1
116 pdes[j].a = 0;
117 pdes[j].d = 0;
118 pdes[j].g = 0;
119 pdes[j].pat= 0;
120 pdes[j].ps = 1;
121 pdes[j].base_low = base >> 13;
123 pdes[j].avail = 0;
124 pdes[j].nx = 0;
125 pdes[j].base_high = (base >> 32) & 0x000FFFFF;
128 #endif
130 MMU->mmu_PDEPageUsed = 0;
133 void core_LoadMMU(struct CPUMMUConfig *MMU)
135 D(bug("[Kernel] %s: Registering PML4 @ 0x%p\n", __func__, MMU->mmu_PML4));
136 wrcr(cr3, MMU->mmu_PML4);
139 void core_SetupMMU(struct CPUMMUConfig *MMU, IPTR memtop)
142 * how many PDE entries shall be created? Detault is 2048 (4GB), unless more RAM
143 * is available...
145 MMU->mmu_PDEPageCount = 2048;
147 /* Does RAM exceed 4GB? adjust amount of PDE pages */
148 if (((memtop + (1 << 21) - 1) >> 21) > MMU->mmu_PDEPageCount)
149 MMU->mmu_PDEPageCount = (memtop + (1 << 21) - 1) >> 21;
151 D(bug("[Kernel] core_SetupMMU: Re-creating the MMU pages for first %dMB area\n", MMU->mmu_PDEPageCount << 1));
153 if (!MMU->mmu_PML4)
156 * Allocate MMU pages and directories. Four PDE directories (PDE2M structures)
157 * are enough to map whole 4GB address space.
159 MMU->mmu_PML4 = krnAllocBootMemAligned(sizeof(struct PML4E) * 512, PAGE_SIZE);
160 MMU->mmu_PDP = krnAllocBootMemAligned(sizeof(struct PDPE) * 512, PAGE_SIZE);
161 MMU->mmu_PDE = krnAllocBootMemAligned(sizeof(struct PDE2M) * MMU->mmu_PDEPageCount, PAGE_SIZE);
162 MMU->mmu_PTE = krnAllocBootMemAligned(sizeof(struct PTE) * 512 * 32, PAGE_SIZE);
164 D(bug("[Kernel] Allocated PML4 0x%p, PDP 0x%p, PDE 0x%p PTE 0x%p\n", MMU->mmu_PML4, MMU->mmu_PDP, MMU->mmu_PDE, MMU->mmu_PTE));
167 core_InitMMU(MMU);
169 core_LoadMMU(MMU);
171 D(bug("[Kernel] core_SetupMMU: Done\n"));
174 void core_ProtPage(intptr_t addr, char p, char rw, char us)
176 struct CPUMMUConfig *MMU;
177 struct PML4E *pml4;
178 struct PDPE *pdpe;
179 struct PDE4K *pde;
180 struct PTE *Pages4K;
181 struct PTE *pte;
183 unsigned long pml4_off = (addr >> 39) & 0x1ff;
184 unsigned long pdpe_off = (addr >> 30) & 0x1ff;
185 unsigned long pde_off = (addr >> 21) & 0x1ff;
186 unsigned long pte_off = (addr >> 12) & 0x1ff;
188 DMMU(bug("[Kernel] Marking page 0x%p as read-only\n", addr));
190 MMU = &__KernBootPrivate->MMU;
191 pml4 = MMU->mmu_PML4;
192 pdpe = (struct PDPE *)((((IPTR)pml4[pml4_off].base_low) << 12) | (((IPTR)pml4[pml4_off].base_high) << 32));
193 pde = (struct PDE4K *)((((IPTR)pdpe[pdpe_off].base_low) << 12) | (((IPTR)pdpe[pdpe_off].base_high) << 32));
194 Pages4K = MMU->mmu_PTE;
196 if (pde[pde_off].ps)
198 /* work on local copy of the affected PDE */
199 struct PDE4K tmp_pde = pde[pde_off];
200 struct PDE2M *pde2 = (struct PDE2M *)pde;
201 intptr_t base = ((IPTR)pde2[pde_off].base_low << 13) | ((IPTR)pde2[pde_off].base_high << 32);
202 int i;
204 pte = &Pages4K[512 * MMU->mmu_PDEPageUsed++];
206 D(bug("[Kernel] The page for address 0x%p was a big one. Splitting it into 4K pages\n", addr));
207 D(bug("[Kernel] Base=0x%p, pte=0x%p\n", base, pte));
209 for (i = 0; i < 512; i++)
211 pte[i].p = 1;
212 pte[i].rw = pde2[pde_off].rw;
213 pte[i].us = pde2[pde_off].us;
214 pte[i].pwt = pde2[pde_off].pwt;
215 pte[i].pcd = pde2[pde_off].pcd;
216 pte[i].base_low = base >> 12;
217 pte[i].base_high = (base >> 32) & 0x0FFFFF;
219 base += PAGE_SIZE;
222 tmp_pde.ps = 0;
223 tmp_pde.base_low = (intptr_t)pte >> 12;
224 tmp_pde.base_high = ((intptr_t)pte >> 32) & 0x0FFFFF;
226 pde[pde_off] = tmp_pde;
229 pte = (struct PTE *)((((IPTR)pde[pde_off].base_low) << 12) | (((IPTR)pde[pde_off].base_high) << 32));
231 pte[pte_off].rw = rw ? 1:0;
232 pte[pte_off].us = us ? 1:0;
233 pte[pte_off].p = p ? 1:0;
234 asm volatile ("invlpg (%0)"::"r"(addr));
237 void core_ProtKernelArea(intptr_t addr, intptr_t length, char p, char rw, char us)
239 D(bug("[Kernel] Protecting area 0x%p - 0x%p\n", addr, addr + length - 1));
241 while (length > 0)
243 core_ProtPage(addr, p, rw, us);
244 addr += 4096;
245 length -= 4096;