3 #include "kernel_base.h"
4 #include "kernel_bootmem.h"
5 #include "kernel_debug.h"
6 #include "kernel_intern.h"
12 void core_SetupMMU(struct KernBootPrivate
*__KernBootPrivate
)
19 D(bug("[Kernel] core_SetupMMU: Re-creating the MMU pages for first 4GB area\n"));
21 if (!__KernBootPrivate
->PML4
)
24 * Allocate MMU pages and directories. Four PDE directories (PDE2M structures)
25 * are enough to map whole 4GB address space.
27 __KernBootPrivate
->PML4
= krnAllocBootMemAligned(sizeof(struct PML4E
) * 512, PAGE_SIZE
);
28 __KernBootPrivate
->PDP
= krnAllocBootMemAligned(sizeof(struct PDPE
) * 512, PAGE_SIZE
);
29 __KernBootPrivate
->PDE
= krnAllocBootMemAligned(sizeof(struct PDE2M
) * 512 * 4, PAGE_SIZE
);
30 __KernBootPrivate
->PTE
= krnAllocBootMemAligned(sizeof(struct PTE
) * 512 * 32, PAGE_SIZE
);
32 D(bug("[Kernel] Allocated PML4 0x%p, PDP 0x%p, PDE 0x%p PTE 0x%p\n", __KernBootPrivate
->PML4
, __KernBootPrivate
->PDP
, __KernBootPrivate
->PDE
, __KernBootPrivate
->PTE
));
35 PML4
= __KernBootPrivate
->PML4
;
36 PDP
= __KernBootPrivate
->PDP
;
37 PDE
= __KernBootPrivate
->PDE
;
39 /* PML4 Entry - we need only the first out of 16 entries */
40 PML4
[0].p
= 1; /* present */
41 PML4
[0].rw
= 1; /* read/write */
42 PML4
[0].us
= 1; /* accessible for user */
43 PML4
[0].pwt
= 0; /* write-through cache */
44 PML4
[0].pcd
= 0; /* cache enabled */
45 PML4
[0].a
= 0; /* not yet accessed */
46 PML4
[0].mbz
= 0; /* must be zero */
47 PML4
[0].base_low
= (unsigned long)PDP
>> 12;
51 PML4
[0].base_high
= ((unsigned long)PDP
>> 32) & 0x000FFFFF;
53 /* PDP Entries. There are four of them used in order to define 2048 pages of 2MB each. */
54 for (i
= 0; i
< 4; i
++)
56 struct PDE2M
*pdes
= &PDE
[512 * i
];
59 /* Set the PDP entry up and point to the PDE table */
67 PDP
[i
].base_low
= (unsigned long)pdes
>> 12;
71 PDP
[i
].base_high
= ((unsigned long)pdes
>> 32) & 0x000FFFFF;
73 for (j
=0; j
< 512; j
++)
75 /* Set PDE entries - use 2MB memory pages, with full supervisor and user access */
76 unsigned long base
= (i
<< 30) + (j
<< 21);
88 pdes
[j
].base_low
= base
>> 13;
92 pdes
[j
].base_high
= (base
>> 32) & 0x000FFFFF;
96 __KernBootPrivate
->used_page
= 0;
98 D(bug("[Kernel] core_SetupMMU: Registering New PML4 @ 0x%p\n", __KernBootPrivate
->PML4
));
99 wrcr(cr3
, __KernBootPrivate
->PML4
);
101 D(bug("[Kernel] core_SetupMMU: Done\n"));
104 void core_ProtPage(intptr_t addr
, char p
, char rw
, char us
)
106 unsigned long pml4_off
= (addr
>> 39) & 0x1ff;
107 unsigned long pdpe_off
= (addr
>> 30) & 0x1ff;
108 unsigned long pde_off
= (addr
>> 21) & 0x1ff;
109 unsigned long pte_off
= (addr
>> 12) & 0x1ff;
111 struct PML4E
*pml4
= __KernBootPrivate
->PML4
;
112 struct PDPE
*pdpe
= (struct PDPE
*)((pml4
[pml4_off
].base_low
<< 12) | ((unsigned long)pml4
[pml4_off
].base_high
<< 32));
113 struct PDE4K
*pde
= (struct PDE4K
*)((pdpe
[pdpe_off
].base_low
<< 12) | ((unsigned long)pdpe
[pdpe_off
].base_high
<< 32));
114 struct PTE
*Pages4K
= __KernBootPrivate
->PTE
;
117 DMMU(bug("[Kernel] Marking page 0x%p as read-only\n", addr
));
121 /* work on local copy of the affected PDE */
122 struct PDE4K tmp_pde
= pde
[pde_off
];
123 struct PDE2M
*pde2
= (struct PDE2M
*)pde
;
124 intptr_t base
= (pde2
[pde_off
].base_low
<< 13) | ((unsigned long)pde2
[pde_off
].base_high
<< 32);
127 pte
= &Pages4K
[512 * __KernBootPrivate
->used_page
++];
129 D(bug("[Kernel] The page for address 0x%p was a big one. Splitting it into 4K pages\n", addr
));
130 D(bug("[Kernel] Base=0x%p, pte=0x%p\n", base
, pte
));
132 for (i
= 0; i
< 512; i
++)
135 pte
[i
].rw
= pde2
[pde_off
].rw
;
136 pte
[i
].us
= pde2
[pde_off
].us
;
137 pte
[i
].pwt
= pde2
[pde_off
].pwt
;
138 pte
[i
].pcd
= pde2
[pde_off
].pcd
;
139 pte
[i
].base_low
= base
>> 12;
140 pte
[i
].base_high
= (base
>> 32) & 0x0FFFFF;
146 tmp_pde
.base_low
= (intptr_t)pte
>> 12;
147 tmp_pde
.base_high
= ((intptr_t)pte
>> 32) & 0x0FFFFF;
149 pde
[pde_off
] = tmp_pde
;
152 pte
= (struct PTE
*)((pde
[pde_off
].base_low
<< 12) | ((unsigned long)pde
[pde_off
].base_high
<< 32));
154 pte
[pte_off
].rw
= rw
? 1:0;
155 pte
[pte_off
].us
= us
? 1:0;
156 pte
[pte_off
].p
= p
? 1:0;
157 asm volatile ("invlpg (%0)"::"r"(addr
));
160 void core_ProtKernelArea(intptr_t addr
, intptr_t length
, char p
, char rw
, char us
)
162 D(bug("[Kernel] Protecting area 0x%p - 0x%p\n", addr
, addr
+ length
- 1));
166 core_ProtPage(addr
, p
, rw
, us
);