Many changes:
[Marmot.git] / mm.c
blob24733e35ee2022ca0c56bacbb8742f1e2abfb038
1 /*
2 * mm.c --
4 * Base memory management functions.
6 */
8 /* XXX: this file's a mess */
10 #include <marmot.h>
13 #define PHYSMEM_BITMAP_BASE (STACKTOP)
15 #define MAX_E820 64
16 #define E820_RAM 1
17 #define E820_RSVD 2
18 #define E820_ACPI_DATA 3
19 #define E820_ACPI_NVS 4
22 typedef enum {
23 MEMTYPE_RAM,
24 MEMTYPE_ROM,
25 MEMTYPE_ACPI_DATA,
26 MEMTYPE_ACPI_NVS,
27 MEMTYPE_IO,
28 MEMTYPE_HOLE
29 } MemRegionType;
31 struct e820 {
32 uint64 base;
33 uint64 length;
34 uint32 type;
35 } __attribute__((packed));
37 extern struct e820 e820_map[MAX_E820];
38 extern uint64 e820_entries;
40 typedef uint64 PML4E;
41 typedef uint64 PDPE;
42 typedef uint64 PDE;
43 typedef uint64 PTE;
46 #define PAGE_ADDR_MASK 0x000ffffffffff000
48 #define PML4E_ADDR_MASK 0x000ffffffffff000
49 #define PML4E_NX (1 << 63)
50 #define PML4E_A (1 << 5)
51 #define PML4E_PCD (1 << 4)
52 #define PML4E_PWT (1 << 3)
53 #define PML4E_US (1 << 2)
54 #define PML4E_RW (1 << 1)
55 #define PML4E_P (1 << 0)
57 #define PDPE_ADDR_MASK 0x000ffffffffff000
58 #define PDPE_NX (1 << 63)
59 #define PDPE_A (1 << 5)
60 #define PDPE_PCD (1 << 4)
61 #define PDPE_PWT (1 << 3)
62 #define PDPE_US (1 << 2)
63 #define PDPE_RW (1 << 1)
64 #define PDPE_P (1 << 0)
66 #define PDE_ADDR_MASK 0x000ffffffffff000
67 #define PDE_NX (1 << 63)
68 #define PDE_A (1 << 5)
69 #define PDE_PCD (1 << 4)
70 #define PDE_PWT (1 << 3)
71 #define PDE_US (1 << 2)
72 #define PDE_RW (1 << 1)
73 #define PDE_P (1 << 0)
75 #define PTE_ADDR_MASK 0x000ffffffffff000
76 #define PTE_NX (1 << 63)
77 #define PTE_G (1 << 8)
78 #define PTE_PAT (1 << 7)
79 #define PTE_D (1 << 6)
80 #define PTE_A (1 << 5)
81 #define PTE_PCD (1 << 4)
82 #define PTE_PWT (1 << 3)
83 #define PTE_US (1 << 2)
84 #define PTE_RW (1 << 1)
85 #define PTE_P (1 << 0)
88 #define PTE_ENTRIES (PAGE_SIZE / sizeof(PTE))
91 #define PTOFFSET(va) (((va) >> 12) & 0x1ff)
93 #define MKPML4E(pdp, flags) (((pdp) & PML4E_ADDR_MASK) | flags)
94 #define MKPDPE(pd, flags) (((pd) & PDPE_ADDR_MASK) | flags)
95 #define MKPDE(pt, flags) (((pt) & PDE_ADDR_MASK) | flags)
96 #define MKPTE(pa, flags) (((pa) & PTE_ADDR_MASK) | flags)
100 * Use the AVL bits (11-9) in each PTE to mark a page as used or free.
103 #define MM_STATUS_MASK 0x0000000000000e00
104 #define MM_PAGE_USED 0x200
105 #define MM_PAGE_INVALID 0x400
107 #define CANONICAL_MASK 0x00007fffffffffff
110 #define BITMAP_SIZE(count) (count >> 3)
112 typedef uint8 MemoryPool;
114 typedef struct {
115 PA base;
116 PA limit;
117 MemRegionType type;
118 } MemRegion;
121 #define MEMORY_REGIONS_MAX 64
123 static MemRegion memoryRegions[MEMORY_REGIONS_MAX];
124 static uint64 memoryRegionCount;
126 typedef struct {
127 char *name;
128 PA base;
129 PA limit;
130 uint64 count;
131 uint8 *map;
132 } Pool;
134 Pool pools[4];
136 static char *poolNames[] = {
137 "ident",
138 "kernel",
139 "priv",
140 "user"
145 MemRegionType __init
146 GetMemType(PA addr)
148 uint64 r;
150 for (r = 0; r < memoryRegionCount; r++) {
151 if (addr >= memoryRegions[r].base && addr < memoryRegions[r].limit) {
152 return memoryRegions[r].type;
156 return MEMTYPE_HOLE;
160 void __init
161 AddMemoryRegion(PA base, PA limit, MemRegionType type)
163 if (memoryRegionCount == MEMORY_REGIONS_MAX) {
164 // PANIC();
167 memoryRegions[memoryRegionCount].base = base;
168 memoryRegions[memoryRegionCount].limit = limit;
169 memoryRegions[memoryRegionCount].type = type;
171 memoryRegionCount++;
176 * FixupE820 --
178 * Sort through the e820 entries, ensuring they're in order, and
179 * joining adjacent identical entries. Then add the regions to
180 * the region map.
184 void __init
185 FixupE820(void)
187 uint64 d, j, k;
188 int doneSorting;
190 if (e820_entries < 2) {
191 return;
194 /* sort the entries */
195 doneSorting = 0;
196 while (!doneSorting) {
197 doneSorting = 1;
199 for (k = 0; k < e820_entries - 1; k++) {
200 if (e820_map[k].base > e820_map[k + 1].base) {
201 struct e820 tmp;
203 tmp.base = e820_map[k].base;
204 tmp.length = e820_map[k].length;
205 tmp.type = e820_map[k].type;
207 e820_map[k].base = e820_map[k + 1].base;
208 e820_map[k].length = e820_map[k + 1].length;
209 e820_map[k].type = e820_map[k + 1].type;
211 e820_map[k].base = tmp.base;
212 e820_map[k].length = tmp.length;
213 e820_map[k].type = tmp.type;
215 doneSorting = 0;
220 /* merge adjacent entries */
221 k = 0;
222 j = 1;
223 while (k < e820_entries - 1) {
224 if (((e820_map[k].base + e820_map[k].length) >= e820_map[j].base) &&
225 (e820_map[k].type == e820_map[j].type)) {
227 /* merge j into k */
228 if (e820_map[k].base + e820_map[k].length <
229 e820_map[j].base + e820_map[j].length) {
230 /* second entry has higher limit than first */
231 e820_map[k].length = ((e820_map[j].base + e820_map[j].length) -
232 e820_map[k].base);
233 } else {
234 /* first entry entirely overlaps second - do nothing */
237 /* move rest of entries down */
238 for (d = k + 1; j < e820_entries; j++) {
239 e820_map[d].base = e820_map[j].base;
240 e820_map[d].length = e820_map[j].length;
241 e820_map[d].type = e820_map[j].type;
244 e820_entries--;
245 } else {
246 k++;
249 j = k + 1;
252 /* adjust to page boundaries */
253 for (k = 0; k < e820_entries; k++) {
255 if (e820_map[k].base & 0xfff) {
256 if (e820_map[k].type == E820_RAM) {
257 /* RAM - adjust base up */
258 e820_map[k].length -= e820_map[k].base & 0xfff;
259 e820_map[k].base = (e820_map[k].base & (~0xfffULL)) + 4096;
260 } else {
261 /* otherwise adjust down*/
262 e820_map[k].length += e820_map[k].base & 0xfff;
263 e820_map[k].base = e820_map[k].base & (~0xfffULL);
267 if (((e820_map[k].base + e820_map[k].length) & 0xfff) &&
268 e820_map[k].type == E820_RAM) {
269 /* adjust limit down */
270 e820_map[k].length -= (e820_map[k].base + e820_map[k].length) & 0xfff;
276 void __init
277 AddE820Regions(void)
279 uint64 k;
281 FixupE820();
283 for (k = 0; k < e820_entries; k++) {
284 MemRegionType t = MEMTYPE_HOLE;
286 switch (e820_map[k].type) {
287 case E820_RAM:
288 t = MEMTYPE_RAM;
289 break;
290 case E820_RSVD:
291 t = MEMTYPE_ROM;
292 break;
293 case E820_ACPI_DATA:
294 t = MEMTYPE_ACPI_DATA;
295 break;
296 case E820_ACPI_NVS:
297 t = MEMTYPE_ACPI_NVS;
298 break;
301 AddMemoryRegion((PA)e820_map[k].base,
302 (PA)(e820_map[k].base + e820_map[k].length),
308 extern uint64 GetCR3(void);
309 asm(".global GetCR3\n"
310 "GetCR3:\n"
311 "\tmovq %cr3, %rax\n"
312 "\tret\n");
313 extern uint64 GetCR2(void);
314 asm(".global GetCR2\n"
315 "GetCR2:\n"
316 "\tmovq %cr2, %rax\n"
317 "\tret\n");
318 extern void SetCR3(uint64);
319 asm(".global SetCR3\n"
320 "SetCR3:\n"
321 "\tmovq %rdi, %cr3\n"
322 "\tret\n");
323 extern void SetCR2(uint64);
324 asm(".global SetCR2\n"
325 "SetCR2:\n"
326 "\tmovq %rdi, %cr2\n"
327 "\tret\n");
328 extern void FlushCR3(void);
329 asm(".global FlushCR3\n"
330 "FlushCR3:\n"
331 "\tmovq %cr3, %rax\n"
332 "\tmovq %rax, %cr3\n"
333 "\tret\n");
336 /* global flag that is TRUE when swapping is enabled */
337 volatile Bool swapping = FALSE;
339 // XXX: for now
340 #define PAGED(e) FALSE
343 * WalkPT --
345 * Walk a page table for a given VA and return a pointer to the
346 * PTE. If no entry exists for the virtual address in any of the
347 * tables, the entry is created.
351 PTE *
352 WalkPT(VA addr)
354 PML4E *pml4, *pml4e;
355 PDPE *pdp, *pdpe;
356 PDE *pd, *pde;
357 PTE *pt, *pte;
359 /* PML4 => PDP */
361 pml4 = (PML4E *)(GetCR3() & ~0xfffULL);
362 pml4e = &pml4[((addr >> 39) & 0x1ff)];
364 if (*pml4e & PML4E_P) {
365 pdp = (PDPE *)(*pml4e & PML4E_ADDR_MASK);
366 } else {
367 /* PDP is swapped out or not yet allocated. */
369 if (swapping && PAGED(*pdpe)) {
370 /* Swap it in. */
372 } else {
373 /* Allocate new PDP */
374 pdp = (PDPE *)PageAlloc(MM_VA_IDENT, 0);
376 bzero(pdp, PAGE_SIZE);
377 *pml4e = MKPDPE((PA)pdp, PDPE_P);
381 /* PDP => PD */
383 pdpe = &pdp[((addr >> 30) & 0x1ff)];
385 if (*pdpe & PDPE_P) {
386 pd = (PDE *)(*pdpe & PDPE_ADDR_MASK);
387 } else {
388 /* PD is either paged out or not yet allocated. */
390 if (swapping && PAGED(*pdpe)) {
391 /* Swap it in. */
393 } else {
394 /* Allocate new PD */
396 pd = (PDE *)PageAlloc(MM_VA_IDENT, 0);
397 bzero(pd, PAGE_SIZE);
398 *pdpe = MKPDPE((PA)pd, PDPE_P);
403 pde = &pd[((addr >> 21) & 0x1ff)];
405 if (*pde & PDE_P) {
406 pt = (PTE *)(*pde & PDE_ADDR_MASK);
407 } else {
408 if (swapping && PAGED(*pde)) {
409 /* Swap it in */
411 } else {
412 /* Allocate new PT */
414 pt = (PTE *)PageAlloc(MM_VA_IDENT, 0);
415 bzero(pt, PAGE_SIZE);
416 *pde = MKPDE((PA)pt, PDE_P);
420 pte = &pt[((addr >> 12) & 0x1ff)];
422 return pte;
427 extern uint64 GetCPL(void);
428 asm(".global GetCPL\n"
429 "GetCPL:\n"
430 "\tmovl %cs, %eax\n"
431 "\tandl $3, %eax\n"
432 "\tmovzwq %ax, %rax\n" /* may not be necessary */
433 "\tret\n");
436 * GetFreePA --
438 * Find a free physical page from the pool and return its
439 * address. Once swapping is turned on, this function will
440 * always return a free page.
444 GetFreePA(MemoryPool pool)
446 PA freePage = MM_PA_INVALID;
447 uint64 byte, bit;
448 uint64 pageOffset = -1ULL;
449 uint64 r;
452 * Find a 0 bit in the bitmask and convert it to a page offset.
455 for (byte = 0; byte < BITMAP_SIZE(pools[pool].count); byte++) {
456 if (pools[pool].map[byte] != 0xff) {
458 for (bit = 0; bit < 8; bit++) {
459 if (pools[pool].map[byte] & (1 << bit)) {
460 continue;
463 break;
466 pageOffset = byte * 8 + bit;
467 pools[pool].map[byte] |= (1 << bit);
469 break;
473 if (pageOffset == -1ULL) {
474 // XXX: do some paging instead of just shutting down
475 asm("cli; hlt");
479 * Now scan through the regions, finding where this page is in memory.
482 for (r = 0; r < memoryRegionCount; r++) {
483 if (memoryRegions[r].type == MEMTYPE_RAM &&
484 pools[pool].base >= memoryRegions[r].base &&
485 pools[pool].base < memoryRegions[r].limit) {
487 if (memoryRegions[r].base + (pageOffset << 12) <
488 memoryRegions[r].limit) {
490 freePage = memoryRegions[r].base + (pageOffset << 12);
491 break;
493 } else {
494 pageOffset -= (memoryRegions[r].limit -
495 memoryRegions[r].base) >> 12;
500 /* tval = pageOffset; tval2 = freePage; asm("\t.global test\ntest:\n"); */
502 return freePage;
505 void
506 ReleasePA(PA addr)
508 /* XXX */
512 void
513 AdjustVPF(VA *pDesired, MemoryPool *pPool, uint64 *pFlags)
515 VA desired;
516 MemoryPool pool;
517 uint64 flags;
519 desired = *pDesired;
520 pool = *pPool;
521 flags = *pFlags;
523 switch (GetCPL()) {
524 case 0:
525 pool = POOL_KERNEL;
527 if (desired == MM_VA_DONT_CARE) {
528 desired = MM_VA_KERNEL_START;
529 } else if (desired == MM_VA_HEAP) {
530 desired = MM_VA_KERNEL_HEAP;
531 } else if (desired == MM_VA_IDENT) {
532 desired = MM_VA_LOADER_START;
533 pool = POOL_IDENT;
535 break;
536 case 1:
537 case 2:
538 if (desired == MM_VA_DONT_CARE) {
539 desired = MM_VA_PRIV_START;
540 } else if (desired == MM_VA_HEAP) {
541 desired = MM_VA_PRIV_HEAP;
543 pool = POOL_PRIVILEGED;
544 break;
545 case 3:
546 if (desired == MM_VA_DONT_CARE) {
547 desired = MM_VA_USER_START;
548 } else if (desired == MM_VA_HEAP) {
549 desired = MM_VA_USER_HEAP;
551 desired = MM_VA_USER_START;
552 pool = POOL_USER;
553 flags |= PTE_US;
554 break;
557 desired &= PTE_ADDR_MASK; // align to page boundary and ensure canonicality
559 *pDesired = desired;
560 *pPool = pool;
561 *pFlags = flags;
566 * PageAlloc --
568 * Allocate a single page of memory and a physical page to back
569 * it. If a virtual address is requested, the allocater attempts
570 * to map there. If MM_VA_DONT_CARE is passed in instead, the
571 * allocater will map at the first available address.
575 PageAlloc(VA desired, uint64 flags)
577 PA freePage;
578 PTE *pte = NULL;
579 VA va = MM_VA_INVALID;
580 MemoryPool pool = POOL_USER;
582 AdjustVPF(&desired, &pool, &flags);
584 freePage = GetFreePA(pool);
586 if (pool == POOL_IDENT) {
587 pte = WalkPT((VA)freePage);
588 ASSERT((*pte & MM_STATUS_MASK) == 0);
590 va = (VA)freePage;
591 } else {
592 VA search = desired;
595 * Scan for an unused VA. If MM_VA_DONT_CARE was passed in,
596 * this may be slow...
599 pte = WalkPT(desired);
601 while (*pte & MM_STATUS_MASK) {
602 search += PAGE_SIZE;
603 pte = WalkPT(search);
606 va = search;
609 /* Update PTE to point to freePage */
610 *pte = MKPTE(freePage, flags | MM_PAGE_USED | PTE_P);
612 return va;
617 * RegionAlloc --
619 * Allocate a contiguous region of virtual memory. Pages cannot
620 * be allocated from POOL_IDENT here.
622 * Returns NULL if a contiguous region cannot be found.
624 * This is intended to be the general-purpose memory allocater.
628 RegionAlloc(VA desired, uint64 nPages, uint64 flags)
630 MemoryPool pool = POOL_USER;
631 VA found = MM_VA_INVALID, start, scan, limit;
632 PTE *pte;
633 uint64 n;
635 AdjustVPF(&desired, &pool, &flags);
637 ASSERT(pool != POOL_IDENT);
639 if (desired < MM_VA_PRIV_START) {
640 limit = MM_VA_PRIV_START;
641 } else if (desired < MM_VA_USER_START) {
642 limit = MM_VA_USER_START;
643 } else {
644 limit = MM_VA_CANONICAL_TOP;
647 /* Need to find an nPage region in virtual space that is available. */
649 for (start = desired; start < limit; start += PAGE_SIZE) {
650 pte = WalkPT(start);
652 if (*pte & MM_STATUS_MASK) {
653 continue;
656 for (scan = start + PAGE_SIZE, n = 0;
657 n < nPages && scan < limit;
658 n++, scan += PAGE_SIZE) {
660 pte = WalkPT(scan);
662 if (*pte & MM_STATUS_MASK) {
663 break;
667 if (n == nPages) {
668 found = start;
669 break;
673 if (found == MM_VA_INVALID) {
674 return NULL;
677 for (scan = found, n = 0; n < nPages; n++, scan += PAGE_SIZE) {
678 pte = WalkPT(scan);
679 *pte = MKPTE(0, flags | MM_PAGE_USED); /* Physmem allocation is lazy. */
682 return found;
687 * PageFree --
689 * Release a page of memory and its virtual mapping.
692 void
693 PageFree(VA page)
695 PTE *pte;
696 PA physPage;
698 pte = WalkPT(page);
699 physPage = *pte & PTE_ADDR_MASK;
701 *pte = 0;
702 ReleasePA(physPage);
707 * PageRemap --
709 * Remap a page's virtual address. Return TRUE if successful,
710 * and FALSE if the target VA is already in use or is outside the
711 * allowable range.
714 Bool
715 PageRemap(VA current, VA new)
718 return FALSE;
722 #define PF_NP 0x01
723 #define PF_RW 0x02
724 #define PF_US 0x04
725 #define PF_RSVD 0x08
726 #define PF_ID 0x10
729 * HandlePF --
731 * Page fault handler (int 14). Called from stub in interrupts.S.
734 void
735 HandlePF(ExcFrame *f)
737 VA CR2;
738 PA freePage;
739 PTE *pte;
740 MemoryPool pool = POOL_USER;
742 CR2 = GetCR2();
743 pte = WalkPT(CR2);
745 if (f->errorCode & PF_NP) {
746 /* #PF caused by permissions will be handled once tasks are
747 implemented. */
748 UNIMPLEMENTED("#PF");
752 * #PF caused by mapped but not allocated page - allocate it here.
755 switch (f->cs & 0x3) {
756 case 0:
757 pool = POOL_KERNEL;
758 break;
759 case 1:
760 case 2:
761 pool = POOL_PRIVILEGED;
762 break;
763 case 3:
764 pool = POOL_USER;
765 break;
768 /* XXX: Once swapping is implemented, will need to switch to kernel
769 * stack and make this a deferred function call as getting a free
770 * PA may take time and require interrupts to be enabled. */
772 freePage = GetFreePA(pool);
773 *pte = MKPTE(freePage, (*pte & 0xfff) | PTE_P);
775 SetCR2(0);
780 * MapFirstPT --
782 * Identity map the first page table. This is called very early
783 * in startup and is needed by the memory mapper.
786 void __init
787 MapFirstPT(void)
789 PA current;
790 PTE *pt = (PTE *)PTBASE;
793 * PML4/PDPT/PD are already initialized.
796 /* First page is BIOS data area - mark ro */
797 pt[PTOFFSET(0)] = MKPTE(0, PTE_P | MM_PAGE_INVALID);
800 * Below STACKTOP (0x18000), all pages are used by the loader.
801 * Mark them as such.
804 for (current = PAGE_SIZE;
805 current < PTE_ENTRIES * PAGE_SIZE;
806 current += PAGE_SIZE) {
807 MemRegionType type;
808 PTE pte;
810 type = GetMemType(current);
812 if (type == MEMTYPE_RAM) {
813 pte = MKPTE(current, PTE_P|PTE_RW);
815 if (current < STACKTOP) {
816 pte |= MM_PAGE_USED;
819 } else if (type == MEMTYPE_ROM) {
820 pte = MKPTE(current, PTE_P | MM_PAGE_INVALID);
821 } else if (type == MEMTYPE_IO ||
822 type == MEMTYPE_ACPI_DATA ||
823 type == MEMTYPE_ACPI_NVS) {
824 pte = MKPTE(current, PTE_P|PTE_RW|PTE_PCD | MM_PAGE_INVALID);
825 } else {
826 pte = MKPTE(current, 0 | MM_PAGE_INVALID); /* mark page NP */
829 pt[PTOFFSET(current)] = pte;
835 * MapIORegion --
837 * Identity map an IO region.
840 void
841 MapIORegion(PA start, PA end, char *name)
843 PTE *pte;
844 VA va;
846 start &= PAGE_ADDR_MASK;
847 end = (PAGE_SIZE - 1 + end) & PAGE_ADDR_MASK;
849 for (va = start; va < end; va += PAGE_SIZE) {
850 pte = WalkPT(va);
851 *pte = MKPTE(va, MM_PAGE_INVALID|PTE_PCD|PTE_RW|PTE_P);
857 * AddRegionsToPools --
859 * Go through memory regions and memory pools and assign base and
860 * limit addresses to each pool.
863 void __init
864 AddRegionsToPools(void)
866 uint64 r, p, countLeft;
867 PA startAddr;
869 r = p = 0;
870 countLeft = pools[p].count * PAGE_SIZE;
871 pools[p].base = startAddr = memoryRegions[0].base;
873 while (r < memoryRegionCount && p < 4) {
874 if (memoryRegions[r].type != MEMTYPE_RAM) {
875 r++;
876 continue;
879 if (startAddr < memoryRegions[r].base) {
880 /* Update startAddr to the current region. */
881 startAddr = memoryRegions[r].base;
884 if (countLeft == 0) {
885 countLeft = pools[p].count * PAGE_SIZE;
886 pools[p].base = startAddr;
889 if (startAddr + countLeft <= memoryRegions[r].limit) {
890 startAddr += countLeft;
891 countLeft = 0;
892 pools[p].limit = startAddr; /* actually end address here */
893 p++;
894 } else if (startAddr + countLeft > memoryRegions[r].limit) {
895 countLeft -= memoryRegions[r].limit - startAddr;
896 r++;
902 static uint64 __init
903 CalculateTotalMem(void)
905 uint64 mem, r;
907 for (mem = 0, r = 0; r < memoryRegionCount; r++) {
908 if (memoryRegions[r].type == MEMTYPE_RAM) {
909 mem += memoryRegions[r].limit - memoryRegions[r].base;
913 return mem;
918 * CreateMemPools --
920 * Divide available physical memory into pools.
922 * Initial allocations are:
924 * ident - 8MB - used for page tables and other basic data structures
925 * kernel - 8MB - kernel text/data
926 * priv - 16MB - privileged processes (drivers)
927 * user - rest - user physmem allocation
930 #define MB (1024ULL * 1024ULL)
931 #define GB (1024ULL * 1024ULL * 1024ULL)
933 void __init
934 CreateMemPools(void)
936 uint64 totalMem, totalPages, bitmapPages, p;
937 PA addr;
939 /* the number of pages in each pool must be divisible by 8 */
940 pools[0].name = poolNames[0];
941 pools[0].count = 8 * MB / PAGE_SIZE;
942 pools[1].name = poolNames[1];
943 pools[1].count = 8 * MB / PAGE_SIZE;
944 pools[2].name = poolNames[2];
945 pools[2].count = 16 * MB / PAGE_SIZE;
946 pools[3].name = poolNames[3];
949 * Each page of bitmask can represent 32768 pages (128MB). As the
950 * range 0x18000 - 0x98000 is used for bitmasks, this allows a
951 * maximum of 4194304 pages or 16GB physical memory. Should this
952 * limit become onerous, this should be pretty easy to revisit.
955 totalMem = CalculateTotalMem();
957 if (totalMem > 16 * GB) {
958 totalMem = 16 * GB;
961 if (totalMem < 32 * MB) {
962 // XXX: PANIC("Not enough memory");
963 asm("cli; hlt\n");
964 } else if (totalMem < 64 * MB) {
965 /* Small mem - halve pool allocations. */
966 pools[0].count = 4 * MB / PAGE_SIZE;
967 pools[1].count = 4 * MB / PAGE_SIZE;
968 pools[2].count = 8 * MB / PAGE_SIZE;
971 pools[3].count = (totalMem / PAGE_SIZE - pools[0].count -
972 pools[1].count - pools[2].count);
974 totalPages = (pools[0].count + pools[1].count +
975 pools[2].count + pools[3].count);
977 /* round up to next full bitmap page */
978 if (totalPages & 0x7fff) {
979 totalPages = (totalPages & 0xffffffffffff8000) + 0x8000;
982 bitmapPages = totalPages >> 15; /* div 32768 */
984 //tval = bitmapPages; asm("\t.global test\ntest:\n");
986 pools[0].map = (uint8 *)PHYSMEM_BITMAP_BASE;
987 pools[1].map = (uint8 *)((uint64)pools[0].map + BITMAP_SIZE(pools[0].count));
988 pools[2].map = (uint8 *)((uint64)pools[1].map + BITMAP_SIZE(pools[1].count));
989 pools[3].map = (uint8 *)((uint64)pools[2].map + BITMAP_SIZE(pools[2].count));
991 AddRegionsToPools();
994 * Finally mark known pages as used in the ident bitmap.
997 /* zero out bitmaps */
998 bzero(pools[0].map, BITMAP_SIZE(pools[0].count));
999 bzero(pools[1].map, BITMAP_SIZE(pools[1].count));
1000 bzero(pools[2].map, BITMAP_SIZE(pools[2].count));
1001 bzero(pools[3].map, BITMAP_SIZE(pools[3].count));
1004 * Used pages are:
1005 * 0 - 0x1000 : BDA
1006 * 0x1000 - 0x6000 : GDT/IDT/PML4/PDPT/PD/PT
1007 * 0x6000 - 0x8000 : loader bss (can be freed later)
1008 * 0x8000 - 0x10000 : loader text/data/rodata
1009 * 0x10000 - 0x18000 : stack
1010 * 0x18000 - ? : bitmaps
1013 for (addr = 0; addr < STACKTOP; addr += PAGE_SIZE) {
1014 uint64 ppn, byte, bit;
1016 ppn = addr >> 12;
1018 byte = ppn >> 3;
1019 bit = ppn & 7;
1021 pools[0].map[byte] |= (uint8)(1 << bit);
1024 for (p = 0; p < bitmapPages; p++) {
1025 uint64 addr, ppn, byte, bit;
1027 addr = (PA)PHYSMEM_BITMAP_BASE + p * PAGE_SIZE;
1028 ppn = addr >> 12;
1030 byte = ppn >> 3;
1031 bit = ppn & 7;
1033 pools[0].map[byte] |= (uint8)(1 << bit);
1039 void __init
1040 MapMemory(void)
1043 * Add known memory regions to list.
1046 memoryRegionCount = 0;
1047 AddE820Regions();
1050 * Fill in the first page table - that will provide some breathing
1051 * room to set up all the various data structures. As part of
1052 * this, clobber the original page tables (though since this region
1053 * will be identity mapped, it won't make a difference).
1056 MapFirstPT();
1057 FlushCR3();
1059 asm("\t.global mapped\nmapped:\n");
1061 /* Now there's room for the rest of the page tables. */
1063 CreateMemPools();