x86_64:
[newos.git] / boot / pc / x86_64 / mmu.c
blob1ca9dffa9354bb8ddd83383f5fcf4b689a70ad57
1 /*
2 ** Copyright 2008, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
4 */
5 #include <string.h>
6 #include "stage2_priv.h"
8 // working pagedir and pagetables
9 static unsigned long *pgtable0 = 0;
11 static unsigned long *pgtables_user = 0;
13 static void setup_identity_map(void)
15 // build a set of page tables to keep the low identity map
16 // the kernel will blow this away shortly
17 pgtables_user = (unsigned long *)0x11000; // use this random spot
19 // clear out all of these page tables
20 memset(pgtables_user, 0, PAGE_SIZE * 2);
22 // point the top level page directory at it
23 pgtable0[0] = (addr_t)pgtables_user | DEFAULT_PAGE_FLAGS;
25 // second level table points to the next
26 pgtables_user[0] = ((addr_t)pgtables_user + PAGE_SIZE) | DEFAULT_PAGE_FLAGS;
28 // third level page table identity maps 1GB using 2MB pages
29 int i;
30 for (i=0; i < 512; i++)
31 pgtables_user[512 + i] = (i * 0x200000) | (1<<7) | DEFAULT_PAGE_FLAGS;
34 static unsigned long *alloc_pagetable(kernel_args *ka)
36 int i;
37 unsigned long *table;
39 table = (unsigned long *)next_paddr;
40 next_paddr += PAGE_SIZE;
41 ka->arch_args.pgtables[ka->arch_args.num_pgtables++] = (addr_t)table;
43 // clear it out
44 for(i = 0; i < 512; i++)
45 table[i] = 0;
47 return table;
50 // allocate enough page tables to allow mapping of kernel and identity mapping of user space
52 // kernel maps at 0xffffffff00000000 (top of 64bit - 4GB)
53 int mmu_init(kernel_args *ka)
55 ka->arch_args.num_pgtables = 0;
57 // allocate a new top level pgdir
58 pgtable0 = alloc_pagetable(ka);
60 // set up the identity map of low ram
61 setup_identity_map();
63 dprintf("switching page dirs: new one at %p\n", pgtable0);
65 // switch to the new pgdir
66 asm("mov %0, %%rax;"
67 "mov %%rax, %%cr3;" :: "m" (pgtable0) : "rax");
69 dprintf("done switching page dirs\n");
71 return 0;
74 #define PGTABLE0_ENTRY(vaddr) (((vaddr) >> 39) & 0x1ff)
75 #define PGTABLE1_ENTRY(vaddr) (((vaddr) >> 30) & 0x1ff)
76 #define PGTABLE2_ENTRY(vaddr) (((vaddr) >> 21) & 0x1ff)
77 #define PGTABLE3_ENTRY(vaddr) (((vaddr) >> 12) & 0x1ff)
79 #define PGENT_TO_ADDR(ent) ((ent) & 0x7ffffffffffff000UL)
80 #define PGENT_PRESENT(ent) ((ent) & 0x1)
82 static unsigned long *lookup_pgtable_entry(addr_t vaddr)
84 vaddr &= ~(PAGE_SIZE-1);
86 // dprintf("lookup_pgtable_entry: vaddr = (%d, %d, %d, %d)\n",
87 // PGTABLE0_ENTRY(vaddr),
88 // PGTABLE1_ENTRY(vaddr),
89 // PGTABLE2_ENTRY(vaddr),
90 // PGTABLE3_ENTRY(vaddr));
92 // dive into the kernel page tables, allocating as they come up
93 unsigned long *pgtable = pgtable0;
94 unsigned long *ent = &pgtable[PGTABLE0_ENTRY(vaddr)];
96 if (!PGENT_PRESENT(*ent)) {
97 pgtable = alloc_pagetable(ka);
98 *ent = (addr_t)pgtable | DEFAULT_PAGE_FLAGS;
99 } else {
100 pgtable = (unsigned long *)PGENT_TO_ADDR(*ent);
102 // dprintf("pgtable_addr 0 %p\n", pgtable);
104 ent = &pgtable[PGTABLE1_ENTRY(vaddr)];
105 if (!PGENT_PRESENT(*ent)) {
106 pgtable = alloc_pagetable(ka);
107 *ent = (addr_t)pgtable | DEFAULT_PAGE_FLAGS;
108 } else {
109 pgtable = (unsigned long *)PGENT_TO_ADDR(*ent);
111 // dprintf("pgtable_addr 1 %p\n", pgtable);
113 ent = &pgtable[PGTABLE2_ENTRY(vaddr)];
114 if (!PGENT_PRESENT(*ent)) {
115 pgtable = alloc_pagetable(ka);
116 *ent = (addr_t)pgtable | DEFAULT_PAGE_FLAGS;
117 } else {
118 pgtable = (unsigned long *)PGENT_TO_ADDR(*ent);
120 // dprintf("pgtable_addr 2 %p\n", pgtable);
122 // now map it
124 return &pgtable[PGTABLE3_ENTRY(vaddr)];
127 // can only map in kernel space
128 void mmu_map_page(addr_t vaddr, addr_t paddr)
130 // dprintf("mmu_map_page: vaddr 0x%lx, paddr 0x%lx\n", vaddr, paddr);
132 unsigned long *pgtable_entry;
134 pgtable_entry = lookup_pgtable_entry(vaddr);
136 paddr &= ~(PAGE_SIZE-1);
137 *pgtable_entry = paddr | DEFAULT_PAGE_FLAGS;