move initialization code to PlatformPostInit()
[AROS.git] / arch / x86_64-pc / kernel / mmu.c
blobeb44940ee04bb3f02042c17581e680ce4420a9ff
1 /*
2 Copyright © 1995-2014, The AROS Development Team. All rights reserved.
3 $Id$
4 */
6 #include <asm/cpu.h>
8 #include "kernel_base.h"
9 #include "kernel_bootmem.h"
10 #include "kernel_debug.h"
11 #include "kernel_intern.h"
12 #include "apic.h"
14 #define D(x)
15 #define DMMU(x)
17 void core_SetupMMU(struct KernBootPrivate *__KernBootPrivate)
19 unsigned int i;
20 struct PML4E *PML4;
21 struct PDPE *PDP;
22 struct PDE2M *PDE;
24 D(bug("[Kernel] core_SetupMMU: Re-creating the MMU pages for first 4GB area\n"));
26 if (!__KernBootPrivate->PML4)
29 * Allocate MMU pages and directories. Four PDE directories (PDE2M structures)
30 * are enough to map whole 4GB address space.
32 __KernBootPrivate->PML4 = krnAllocBootMemAligned(sizeof(struct PML4E) * 512, PAGE_SIZE);
33 __KernBootPrivate->PDP = krnAllocBootMemAligned(sizeof(struct PDPE) * 512, PAGE_SIZE);
34 __KernBootPrivate->PDE = krnAllocBootMemAligned(sizeof(struct PDE2M) * 512 * 4, PAGE_SIZE);
35 __KernBootPrivate->PTE = krnAllocBootMemAligned(sizeof(struct PTE) * 512 * 32, PAGE_SIZE);
37 D(bug("[Kernel] Allocated PML4 0x%p, PDP 0x%p, PDE 0x%p PTE 0x%p\n", __KernBootPrivate->PML4, __KernBootPrivate->PDP, __KernBootPrivate->PDE, __KernBootPrivate->PTE));
40 PML4 = __KernBootPrivate->PML4;
41 PDP = __KernBootPrivate->PDP;
42 PDE = __KernBootPrivate->PDE;
44 /* PML4 Entry - we need only the first out of 16 entries */
45 PML4[0].p = 1; /* present */
46 PML4[0].rw = 1; /* read/write */
47 PML4[0].us = 1; /* accessible for user */
48 PML4[0].pwt= 0; /* write-through cache */
49 PML4[0].pcd= 0; /* cache enabled */
50 PML4[0].a = 0; /* not yet accessed */
51 PML4[0].mbz= 0; /* must be zero */
52 PML4[0].base_low = (unsigned long)PDP >> 12;
53 PML4[0].avl= 0;
54 PML4[0].nx = 0;
55 PML4[0].avail = 0;
56 PML4[0].base_high = ((unsigned long)PDP >> 32) & 0x000FFFFF;
58 /* PDP Entries. There are four of them used in order to define 2048 pages of 2MB each. */
59 for (i = 0; i < 4; i++)
61 struct PDE2M *pdes = &PDE[512 * i];
62 unsigned int j;
64 /* Set the PDP entry up and point to the PDE table */
65 PDP[i].p = 1;
66 PDP[i].rw = 1;
67 PDP[i].us = 1;
68 PDP[i].pwt= 0;
69 PDP[i].pcd= 0;
70 PDP[i].a = 0;
71 PDP[i].mbz= 0;
72 PDP[i].base_low = (unsigned long)pdes >> 12;
74 PDP[i].nx = 0;
75 PDP[i].avail = 0;
76 PDP[i].base_high = ((unsigned long)pdes >> 32) & 0x000FFFFF;
78 for (j=0; j < 512; j++)
80 /* Set PDE entries - use 2MB memory pages, with full supervisor and user access */
81 unsigned long base = (i << 30) + (j << 21);
83 pdes[j].p = 1;
84 pdes[j].rw = 1;
85 pdes[j].us = 1;
86 pdes[j].pwt= 0; // 1
87 pdes[j].pcd= 0; // 1
88 pdes[j].a = 0;
89 pdes[j].d = 0;
90 pdes[j].g = 0;
91 pdes[j].pat= 0;
92 pdes[j].ps = 1;
93 pdes[j].base_low = base >> 13;
95 pdes[j].avail = 0;
96 pdes[j].nx = 0;
97 pdes[j].base_high = (base >> 32) & 0x000FFFFF;
101 __KernBootPrivate->used_page = 0;
103 D(bug("[Kernel] core_SetupMMU: Registering New PML4 @ 0x%p\n", __KernBootPrivate->PML4));
104 wrcr(cr3, __KernBootPrivate->PML4);
106 D(bug("[Kernel] core_SetupMMU: Done\n"));
109 void core_ProtPage(intptr_t addr, char p, char rw, char us)
111 unsigned long pml4_off = (addr >> 39) & 0x1ff;
112 unsigned long pdpe_off = (addr >> 30) & 0x1ff;
113 unsigned long pde_off = (addr >> 21) & 0x1ff;
114 unsigned long pte_off = (addr >> 12) & 0x1ff;
116 struct PML4E *pml4 = __KernBootPrivate->PML4;
117 struct PDPE *pdpe = (struct PDPE *)((pml4[pml4_off].base_low << 12) | ((unsigned long)pml4[pml4_off].base_high << 32));
118 struct PDE4K *pde = (struct PDE4K *)((pdpe[pdpe_off].base_low << 12) | ((unsigned long)pdpe[pdpe_off].base_high << 32));
119 struct PTE *Pages4K = __KernBootPrivate->PTE;
120 struct PTE *pte;
122 DMMU(bug("[Kernel] Marking page 0x%p as read-only\n", addr));
124 if (pde[pde_off].ps)
126 /* work on local copy of the affected PDE */
127 struct PDE4K tmp_pde = pde[pde_off];
128 struct PDE2M *pde2 = (struct PDE2M *)pde;
129 intptr_t base = (pde2[pde_off].base_low << 13) | ((unsigned long)pde2[pde_off].base_high << 32);
130 int i;
132 pte = &Pages4K[512 * __KernBootPrivate->used_page++];
134 D(bug("[Kernel] The page for address 0x%p was a big one. Splitting it into 4K pages\n", addr));
135 D(bug("[Kernel] Base=0x%p, pte=0x%p\n", base, pte));
137 for (i = 0; i < 512; i++)
139 pte[i].p = 1;
140 pte[i].rw = pde2[pde_off].rw;
141 pte[i].us = pde2[pde_off].us;
142 pte[i].pwt = pde2[pde_off].pwt;
143 pte[i].pcd = pde2[pde_off].pcd;
144 pte[i].base_low = base >> 12;
145 pte[i].base_high = (base >> 32) & 0x0FFFFF;
147 base += PAGE_SIZE;
150 tmp_pde.ps = 0;
151 tmp_pde.base_low = (intptr_t)pte >> 12;
152 tmp_pde.base_high = ((intptr_t)pte >> 32) & 0x0FFFFF;
154 pde[pde_off] = tmp_pde;
157 pte = (struct PTE *)((pde[pde_off].base_low << 12) | ((unsigned long)pde[pde_off].base_high << 32));
159 pte[pte_off].rw = rw ? 1:0;
160 pte[pte_off].us = us ? 1:0;
161 pte[pte_off].p = p ? 1:0;
162 asm volatile ("invlpg (%0)"::"r"(addr));
165 void core_ProtKernelArea(intptr_t addr, intptr_t length, char p, char rw, char us)
167 D(bug("[Kernel] Protecting area 0x%p - 0x%p\n", addr, addr + length - 1));
169 while (length > 0)
171 core_ProtPage(addr, p, rw, us);
172 addr += 4096;
173 length -= 4096;