2 ** Copyright 2008, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
6 #include "stage2_priv.h"
8 // working pagedir and pagetables
9 static unsigned long *pgtable0
= 0;
11 static unsigned long *pgtables_user
= 0;
13 static void setup_identity_map(void)
15 // build a set of page tables to keep the low identity map
16 // the kernel will blow this away shortly
17 pgtables_user
= (unsigned long *)0x11000; // use this random spot
19 // clear out all of these page tables
20 memset(pgtables_user
, 0, PAGE_SIZE
* 2);
22 // point the top level page directory at it
23 pgtable0
[0] = (addr_t
)pgtables_user
| DEFAULT_PAGE_FLAGS
;
25 // second level table points to the next
26 pgtables_user
[0] = ((addr_t
)pgtables_user
+ PAGE_SIZE
) | DEFAULT_PAGE_FLAGS
;
28 // third level page table identity maps 1GB using 2MB pages
30 for (i
=0; i
< 512; i
++)
31 pgtables_user
[512 + i
] = (i
* 0x200000) | (1<<7) | DEFAULT_PAGE_FLAGS
;
34 static unsigned long *alloc_pagetable(kernel_args
*ka
)
39 table
= (unsigned long *)next_paddr
;
40 next_paddr
+= PAGE_SIZE
;
41 ka
->arch_args
.pgtables
[ka
->arch_args
.num_pgtables
++] = (addr_t
)table
;
44 for(i
= 0; i
< 512; i
++)
47 // dprintf("alloc_pagetable returning %p\n", table);
52 // allocate enough page tables to allow mapping of kernel and identity mapping of user space
54 // kernel maps at 0xffffffff00000000 (top of 64bit - 4GB)
55 int mmu_init(kernel_args
*ka
)
57 ka
->arch_args
.num_pgtables
= 0;
59 // allocate a new top level pgdir
60 pgtable0
= alloc_pagetable(ka
);
61 ka
->arch_args
.phys_pgdir
= (addr_t
)pgtable0
;
63 // set up the identity map of low ram
66 dprintf("switching page dirs: new one at %p\n", pgtable0
);
68 // switch to the new pgdir
70 "mov %%rax, %%cr3;" :: "m" (pgtable0
) : "rax");
72 dprintf("done switching page dirs\n");
77 #define PGTABLE0_ENTRY(vaddr) (((vaddr) >> 39) & 0x1ff)
78 #define PGTABLE1_ENTRY(vaddr) (((vaddr) >> 30) & 0x1ff)
79 #define PGTABLE2_ENTRY(vaddr) (((vaddr) >> 21) & 0x1ff)
80 #define PGTABLE3_ENTRY(vaddr) (((vaddr) >> 12) & 0x1ff)
82 #define PGENT_TO_ADDR(ent) ((ent) & 0x7ffffffffffff000UL)
83 #define PGENT_PRESENT(ent) ((ent) & 0x1)
85 static unsigned long *lookup_pgtable_entry(addr_t vaddr
)
87 vaddr
&= ~(PAGE_SIZE
-1);
89 // dprintf("lookup_pgtable_entry: vaddr = (%d, %d, %d, %d)\n",
90 // PGTABLE0_ENTRY(vaddr),
91 // PGTABLE1_ENTRY(vaddr),
92 // PGTABLE2_ENTRY(vaddr),
93 // PGTABLE3_ENTRY(vaddr));
95 // dive into the kernel page tables, allocating as they come up
96 unsigned long *pgtable
= pgtable0
;
97 unsigned long *ent
= &pgtable
[PGTABLE0_ENTRY(vaddr
)];
99 if (!PGENT_PRESENT(*ent
)) {
100 pgtable
= alloc_pagetable(ka
);
101 *ent
= (addr_t
)pgtable
| DEFAULT_PAGE_FLAGS
;
103 // dprintf("existing ent 0x%lx\n", *ent);
104 pgtable
= (unsigned long *)PGENT_TO_ADDR(*ent
);
106 // dprintf("pgtable_addr 0 %p\n", pgtable);
108 ent
= &pgtable
[PGTABLE1_ENTRY(vaddr
)];
109 if (!PGENT_PRESENT(*ent
)) {
110 pgtable
= alloc_pagetable(ka
);
111 *ent
= (addr_t
)pgtable
| DEFAULT_PAGE_FLAGS
;
113 // dprintf("existing ent 0x%lx\n", *ent);
114 pgtable
= (unsigned long *)PGENT_TO_ADDR(*ent
);
116 // dprintf("pgtable_addr 1 %p\n", pgtable);
118 ent
= &pgtable
[PGTABLE2_ENTRY(vaddr
)];
119 if (!PGENT_PRESENT(*ent
)) {
120 pgtable
= alloc_pagetable(ka
);
121 *ent
= (addr_t
)pgtable
| DEFAULT_PAGE_FLAGS
;
123 // dprintf("existing ent 0x%lx\n", *ent);
124 pgtable
= (unsigned long *)PGENT_TO_ADDR(*ent
);
126 // dprintf("pgtable_addr 2 %p\n", pgtable);
130 return &pgtable
[PGTABLE3_ENTRY(vaddr
)];
133 // can only map in kernel space
134 void mmu_map_page(addr_t vaddr
, addr_t paddr
)
136 // dprintf("mmu_map_page: vaddr 0x%lx, paddr 0x%lx\n", vaddr, paddr);
138 unsigned long *pgtable_entry
;
140 pgtable_entry
= lookup_pgtable_entry(vaddr
);
142 paddr
&= ~(PAGE_SIZE
-1);
143 *pgtable_entry
= paddr
| DEFAULT_PAGE_FLAGS
;