invoke arch specific distfiles targets
[AROS.git] / arch / x86_64-pc / kernel / mmu.c
blob9a3b25321deb16b6d80601a5ab8a7c244830917e
1 #include <asm/cpu.h>
3 #include "kernel_base.h"
4 #include "kernel_bootmem.h"
5 #include "kernel_debug.h"
6 #include "kernel_intern.h"
7 #include "apic.h"
9 #define D(x)
10 #define DMMU(x)
12 void core_SetupMMU(struct KernBootPrivate *__KernBootPrivate)
14 unsigned int i;
15 struct PML4E *PML4;
16 struct PDPE *PDP;
17 struct PDE2M *PDE;
19 D(bug("[Kernel] core_SetupMMU: Re-creating the MMU pages for first 4GB area\n"));
21 if (!__KernBootPrivate->PML4)
24 * Allocate MMU pages and directories. Four PDE directories (PDE2M structures)
25 * are enough to map whole 4GB address space.
27 __KernBootPrivate->PML4 = krnAllocBootMemAligned(sizeof(struct PML4E) * 512, PAGE_SIZE);
28 __KernBootPrivate->PDP = krnAllocBootMemAligned(sizeof(struct PDPE) * 512, PAGE_SIZE);
29 __KernBootPrivate->PDE = krnAllocBootMemAligned(sizeof(struct PDE2M) * 512 * 4, PAGE_SIZE);
30 __KernBootPrivate->PTE = krnAllocBootMemAligned(sizeof(struct PTE) * 512 * 32, PAGE_SIZE);
32 D(bug("[Kernel] Allocated PML4 0x%p, PDP 0x%p, PDE 0x%p PTE 0x%p\n", __KernBootPrivate->PML4, __KernBootPrivate->PDP, __KernBootPrivate->PDE, __KernBootPrivate->PTE));
35 PML4 = __KernBootPrivate->PML4;
36 PDP = __KernBootPrivate->PDP;
37 PDE = __KernBootPrivate->PDE;
39 /* PML4 Entry - we need only the first out of 16 entries */
40 PML4[0].p = 1; /* present */
41 PML4[0].rw = 1; /* read/write */
42 PML4[0].us = 1; /* accessible for user */
43 PML4[0].pwt= 0; /* write-through cache */
44 PML4[0].pcd= 0; /* cache enabled */
45 PML4[0].a = 0; /* not yet accessed */
46 PML4[0].mbz= 0; /* must be zero */
47 PML4[0].base_low = (unsigned long)PDP >> 12;
48 PML4[0].avl= 0;
49 PML4[0].nx = 0;
50 PML4[0].avail = 0;
51 PML4[0].base_high = ((unsigned long)PDP >> 32) & 0x000FFFFF;
53 /* PDP Entries. There are four of them used in order to define 2048 pages of 2MB each. */
54 for (i = 0; i < 4; i++)
56 struct PDE2M *pdes = &PDE[512 * i];
57 unsigned int j;
59 /* Set the PDP entry up and point to the PDE table */
60 PDP[i].p = 1;
61 PDP[i].rw = 1;
62 PDP[i].us = 1;
63 PDP[i].pwt= 0;
64 PDP[i].pcd= 0;
65 PDP[i].a = 0;
66 PDP[i].mbz= 0;
67 PDP[i].base_low = (unsigned long)pdes >> 12;
69 PDP[i].nx = 0;
70 PDP[i].avail = 0;
71 PDP[i].base_high = ((unsigned long)pdes >> 32) & 0x000FFFFF;
73 for (j=0; j < 512; j++)
75 /* Set PDE entries - use 2MB memory pages, with full supervisor and user access */
76 unsigned long base = (i << 30) + (j << 21);
78 pdes[j].p = 1;
79 pdes[j].rw = 1;
80 pdes[j].us = 1;
81 pdes[j].pwt= 0; // 1
82 pdes[j].pcd= 0; // 1
83 pdes[j].a = 0;
84 pdes[j].d = 0;
85 pdes[j].g = 0;
86 pdes[j].pat= 0;
87 pdes[j].ps = 1;
88 pdes[j].base_low = base >> 13;
90 pdes[j].avail = 0;
91 pdes[j].nx = 0;
92 pdes[j].base_high = (base >> 32) & 0x000FFFFF;
96 __KernBootPrivate->used_page = 0;
98 D(bug("[Kernel] core_SetupMMU: Registering New PML4 @ 0x%p\n", __KernBootPrivate->PML4));
99 wrcr(cr3, __KernBootPrivate->PML4);
101 D(bug("[Kernel] core_SetupMMU: Done\n"));
104 void core_ProtPage(intptr_t addr, char p, char rw, char us)
106 unsigned long pml4_off = (addr >> 39) & 0x1ff;
107 unsigned long pdpe_off = (addr >> 30) & 0x1ff;
108 unsigned long pde_off = (addr >> 21) & 0x1ff;
109 unsigned long pte_off = (addr >> 12) & 0x1ff;
111 struct PML4E *pml4 = __KernBootPrivate->PML4;
112 struct PDPE *pdpe = (struct PDPE *)((pml4[pml4_off].base_low << 12) | ((unsigned long)pml4[pml4_off].base_high << 32));
113 struct PDE4K *pde = (struct PDE4K *)((pdpe[pdpe_off].base_low << 12) | ((unsigned long)pdpe[pdpe_off].base_high << 32));
114 struct PTE *Pages4K = __KernBootPrivate->PTE;
115 struct PTE *pte;
117 DMMU(bug("[Kernel] Marking page 0x%p as read-only\n", addr));
119 if (pde[pde_off].ps)
121 /* work on local copy of the affected PDE */
122 struct PDE4K tmp_pde = pde[pde_off];
123 struct PDE2M *pde2 = (struct PDE2M *)pde;
124 intptr_t base = (pde2[pde_off].base_low << 13) | ((unsigned long)pde2[pde_off].base_high << 32);
125 int i;
127 pte = &Pages4K[512 * __KernBootPrivate->used_page++];
129 D(bug("[Kernel] The page for address 0x%p was a big one. Splitting it into 4K pages\n", addr));
130 D(bug("[Kernel] Base=0x%p, pte=0x%p\n", base, pte));
132 for (i = 0; i < 512; i++)
134 pte[i].p = 1;
135 pte[i].rw = pde2[pde_off].rw;
136 pte[i].us = pde2[pde_off].us;
137 pte[i].pwt = pde2[pde_off].pwt;
138 pte[i].pcd = pde2[pde_off].pcd;
139 pte[i].base_low = base >> 12;
140 pte[i].base_high = (base >> 32) & 0x0FFFFF;
142 base += PAGE_SIZE;
145 tmp_pde.ps = 0;
146 tmp_pde.base_low = (intptr_t)pte >> 12;
147 tmp_pde.base_high = ((intptr_t)pte >> 32) & 0x0FFFFF;
149 pde[pde_off] = tmp_pde;
152 pte = (struct PTE *)((pde[pde_off].base_low << 12) | ((unsigned long)pde[pde_off].base_high << 32));
154 pte[pte_off].rw = rw ? 1:0;
155 pte[pte_off].us = us ? 1:0;
156 pte[pte_off].p = p ? 1:0;
157 asm volatile ("invlpg (%0)"::"r"(addr));
160 void core_ProtKernelArea(intptr_t addr, intptr_t length, char p, char rw, char us)
162 D(bug("[Kernel] Protecting area 0x%p - 0x%p\n", addr, addr + length - 1));
164 while (length > 0)
166 core_ProtPage(addr, p, rw, us);
167 addr += 4096;
168 length -= 4096;