enhanced backtrace output
[mit-jos.git] / kern / pmap.c
blob37c187ad0298154955e2a684b5c35b9c4c7fe484
1 /* See COPYRIGHT for copyright information. */
3 #include <inc/x86.h>
4 #include <inc/mmu.h>
5 #include <inc/error.h>
6 #include <inc/string.h>
7 #include <inc/assert.h>
9 #include <kern/pmap.h>
10 #include <kern/kclock.h>
12 // These variables are set by i386_detect_memory()
13 static physaddr_t maxpa; // Maximum physical address
14 size_t npage; // Amount of physical memory (in pages)
15 static size_t basemem; // Amount of base memory (in bytes)
16 static size_t extmem; // Amount of extended memory (in bytes)
18 // These variables are set in i386_vm_init()
19 pde_t* boot_pgdir; // Virtual address of boot time page directory
20 physaddr_t boot_cr3; // Physical address of boot time page directory
21 static char* boot_freemem; // Pointer to next byte of free mem
23 struct Page* pages; // Virtual address of physical page array
24 static struct Page_list page_free_list; // Free list of physical pages
26 // Global descriptor table.
28 // The kernel and user segments are identical (except for the DPL).
29 // To load the SS register, the CPL must equal the DPL. Thus,
30 // we must duplicate the segments for the user and the kernel.
32 struct Segdesc gdt[] =
34 // 0x0 - unused (always faults -- for trapping NULL far pointers)
35 SEG_NULL,
37 // 0x8 - kernel code segment
38 [GD_KT >> 3] = SEG(STA_X | STA_R, 0x0, 0xffffffff, 0),
40 // 0x10 - kernel data segment
41 [GD_KD >> 3] = SEG(STA_W, 0x0, 0xffffffff, 0),
43 // 0x18 - user code segment
44 [GD_UT >> 3] = SEG(STA_X | STA_R, 0x0, 0xffffffff, 3),
46 // 0x20 - user data segment
47 [GD_UD >> 3] = SEG(STA_W, 0x0, 0xffffffff, 3),
49 // 0x28 - tss, initialized in idt_init()
50 [GD_TSS >> 3] = SEG_NULL
53 struct Pseudodesc gdt_pd = {
54 sizeof(gdt) - 1, (unsigned long) gdt
57 static int
58 nvram_read(int r)
60 return mc146818_read(r) | (mc146818_read(r + 1) << 8);
63 void
64 i386_detect_memory(void)
66 // CMOS tells us how many kilobytes there are
67 basemem = ROUNDDOWN(nvram_read(NVRAM_BASELO)*1024, PGSIZE);
68 extmem = ROUNDDOWN(nvram_read(NVRAM_EXTLO)*1024, PGSIZE);
70 // Calculate the maximum physical address based on whether
71 // or not there is any extended memory. See comment in <inc/mmu.h>.
72 if (extmem)
73 maxpa = EXTPHYSMEM + extmem;
74 else
75 maxpa = basemem;
77 npage = maxpa / PGSIZE;
79 cprintf("Physical memory: %dK available, ", (int)(maxpa/1024));
80 cprintf("base = %dK, extended = %dK\n", (int)(basemem/1024), (int)(extmem/1024));
83 // --------------------------------------------------------------
84 // Set up initial memory mappings and turn on MMU.
85 // --------------------------------------------------------------
87 static void check_boot_pgdir(void);
88 static void check_page_alloc();
89 static void page_check(void);
90 static void boot_map_segment(pde_t *pgdir, uintptr_t la, size_t size, physaddr_t pa, int perm);
93 // A simple physical memory allocator, used only a few times
94 // in the process of setting up the virtual memory system.
95 // page_alloc() is the real allocator.
97 // Allocate n bytes of physical memory aligned on an
98 // align-byte boundary. Align must be a power of two.
99 // Return kernel virtual address. Returned memory is uninitialized.
101 // If we're out of memory, boot_alloc should panic.
102 // This function may ONLY be used during initialization,
103 // before the page_free_list has been set up.
105 static void*
106 boot_alloc(uint32_t n, uint32_t align)
108 extern char end[];
109 void *v;
111 // Initialize boot_freemem if this is the first time.
112 // 'end' is a magic symbol automatically generated by the linker,
113 // which points to the end of the kernel's bss segment -
114 // i.e., the first virtual address that the linker
115 // did _not_ assign to any kernel code or global variables.
116 if (boot_freemem == 0)
117 boot_freemem = end;
119 // LAB 2: Your code here:
120 // Step 1: round boot_freemem up to be aligned properly
121 // Step 2: save current value of boot_freemem as allocated chunk
122 // Step 3: increase boot_freemem to record allocation
123 // Step 4: return allocated chunk
125 // if align is zero, I just bypass the test
126 // i.e. I think zero as a power of two, which you may not agree with.
127 if (align) {
128 if (align & (align - 1))
129 panic("boot_alloc : align is not a power of two.\n");
130 boot_freemem = ROUNDUP(boot_freemem, align);
133 v = (void *)boot_freemem;
134 boot_freemem += n;
136 if ((unsigned int)boot_freemem < (unsigned int)end)
137 panic("boot_alloc: out of memory.\n");
138 else
139 return v;
142 // Set up a two-level page table:
143 // boot_pgdir is its linear (virtual) address of the root
144 // boot_cr3 is the physical adresss of the root
145 // Then turn on paging. Then effectively turn off segmentation.
146 // (i.e., the segment base addrs are set to zero).
148 // This function only sets up the kernel part of the address space
149 // (ie. addresses >= UTOP). The user part of the address space
150 // will be setup later.
152 // From UTOP to ULIM, the user is allowed to read but not write.
153 // Above ULIM the user cannot read (or write).
154 void
155 i386_vm_init(void)
157 pde_t* pgdir;
158 uint32_t cr0;
159 size_t n;
161 //////////////////////////////////////////////////////////////////////
162 // create initial page directory.
163 pgdir = boot_alloc(PGSIZE, PGSIZE);
164 memset(pgdir, 0, PGSIZE);
165 boot_pgdir = pgdir;
166 boot_cr3 = PADDR(pgdir);
168 //////////////////////////////////////////////////////////////////////
169 // Recursively insert PD in itself as a page table, to form
170 // a virtual page table at virtual address VPT.
171 // (For now, you don't have understand the greater purpose of the
172 // following two lines.)
174 // Permissions: kernel RW, user NONE
175 pgdir[PDX(VPT)] = PADDR(pgdir)|PTE_W|PTE_P;
177 // same for UVPT
178 // Permissions: kernel R, user R
179 pgdir[PDX(UVPT)] = PADDR(pgdir)|PTE_U|PTE_P;
181 //////////////////////////////////////////////////////////////////////
182 // Make 'pages' point to an array of size 'npage' of 'struct Page'.
183 // The kernel uses this structure to keep track of physical pages;
184 // 'npage' equals the number of physical pages in memory. User-level
185 // programs will get read-only access to the array as well.
186 // You must allocate the array yourself.
187 // Your code goes here:
188 n = ROUNDUP((sizeof(struct Page) * npage), PGSIZE);
189 pages = boot_alloc(n, PGSIZE);
191 //////////////////////////////////////////////////////////////////////
192 // Now that we've allocated the initial kernel data structures, we set
193 // up the list of free physical pages. Once we've done so, all further
194 // memory management will go through the page_* functions. In
195 // particular, we can now map memory using boot_map_segment or page_insert
196 page_init();
198 check_page_alloc();
200 page_check();
202 //////////////////////////////////////////////////////////////////////
203 // Now we set up virtual memory
205 //////////////////////////////////////////////////////////////////////
206 // Map 'pages' read-only by the user at linear address UPAGES
207 // (ie. perm = PTE_U | PTE_P)
208 // Permissions:
209 // - pages -- kernel RW, user NONE
210 // - the read-only version mapped at UPAGES -- kernel R, user R
211 // Your code goes here:
215 //////////////////////////////////////////////////////////////////////
216 // Map the kernel stack (symbol name "bootstack"). The complete VA
217 // range of the stack, [KSTACKTOP-PTSIZE, KSTACKTOP), breaks into two
218 // pieces:
219 // * [KSTACKTOP-KSTKSIZE, KSTACKTOP) -- backed by physical memory
220 // * [KSTACKTOP-PTSIZE, KSTACKTOP-KSTKSIZE) -- not backed => faults
221 // Permissions: kernel RW, user NONE
222 // Your code goes here:
224 //////////////////////////////////////////////////////////////////////
225 // Map all of physical memory at KERNBASE.
226 // Ie. the VA range [KERNBASE, 2^32) should map to
227 // the PA range [0, 2^32 - KERNBASE)
228 // We might not have 2^32 - KERNBASE bytes of physical memory, but
229 // we just set up the amapping anyway.
230 // Permissions: kernel RW, user NONE
231 // Your code goes here:
233 // Check that the initial page directory has been set up correctly.
234 check_boot_pgdir();
236 //////////////////////////////////////////////////////////////////////
237 // On x86, segmentation maps a VA to a LA (linear addr) and
238 // paging maps the LA to a PA. I.e. VA => LA => PA. If paging is
239 // turned off the LA is used as the PA. Note: there is no way to
240 // turn off segmentation. The closest thing is to set the base
241 // address to 0, so the VA => LA mapping is the identity.
243 // Current mapping: VA KERNBASE+x => PA x.
244 // (segmentation base=-KERNBASE and paging is off)
246 // From here on down we must maintain this VA KERNBASE + x => PA x
247 // mapping, even though we are turning on paging and reconfiguring
248 // segmentation.
250 // Map VA 0:4MB same as VA KERNBASE, i.e. to PA 0:4MB.
251 // (Limits our kernel to <4MB)
252 pgdir[0] = pgdir[PDX(KERNBASE)];
254 // Install page table.
255 lcr3(boot_cr3);
257 // Turn on paging.
258 cr0 = rcr0();
259 cr0 |= CR0_PE|CR0_PG|CR0_AM|CR0_WP|CR0_NE|CR0_TS|CR0_EM|CR0_MP;
260 cr0 &= ~(CR0_TS|CR0_EM);
261 lcr0(cr0);
263 // Current mapping: KERNBASE+x => x => x.
264 // (x < 4MB so uses paging pgdir[0])
266 // Reload all segment registers.
267 asm volatile("lgdt gdt_pd");
268 asm volatile("movw %%ax,%%gs" :: "a" (GD_UD|3));
269 asm volatile("movw %%ax,%%fs" :: "a" (GD_UD|3));
270 asm volatile("movw %%ax,%%es" :: "a" (GD_KD));
271 asm volatile("movw %%ax,%%ds" :: "a" (GD_KD));
272 asm volatile("movw %%ax,%%ss" :: "a" (GD_KD));
273 asm volatile("ljmp %0,$1f\n 1:\n" :: "i" (GD_KT)); // reload cs
274 asm volatile("lldt %%ax" :: "a" (0));
276 // Final mapping: KERNBASE+x => KERNBASE+x => x.
278 // This mapping was only used after paging was turned on but
279 // before the segment registers were reloaded.
280 pgdir[0] = 0;
282 // Flush the TLB for good measure, to kill the pgdir[0] mapping.
283 lcr3(boot_cr3);
287 // Check the physical page allocator (page_alloc(), page_free(),
288 // and page_init()).
290 static void
291 check_page_alloc()
293 struct Page *pp, *pp0, *pp1, *pp2;
294 struct Page_list fl;
296 // if there's a page that shouldn't be on
297 // the free list, try to make sure it
298 // eventually causes trouble.
299 LIST_FOREACH(pp0, &page_free_list, pp_link)
300 memset(page2kva(pp0), 0x97, 128);
302 // should be able to allocate three pages
303 pp0 = pp1 = pp2 = 0;
304 assert(page_alloc(&pp0) == 0);
305 assert(page_alloc(&pp1) == 0);
306 assert(page_alloc(&pp2) == 0);
308 assert(pp0);
309 assert(pp1 && pp1 != pp0);
310 assert(pp2 && pp2 != pp1 && pp2 != pp0);
311 assert(page2pa(pp0) < npage*PGSIZE);
312 assert(page2pa(pp1) < npage*PGSIZE);
313 assert(page2pa(pp2) < npage*PGSIZE);
315 // temporarily steal the rest of the free pages
316 fl = page_free_list;
317 LIST_INIT(&page_free_list);
319 // should be no free memory
320 assert(page_alloc(&pp) == -E_NO_MEM);
322 // free and re-allocate?
323 page_free(pp0);
324 page_free(pp1);
325 page_free(pp2);
327 pp0 = pp1 = pp2 = 0;
328 assert(page_alloc(&pp0) == 0);
329 assert(page_alloc(&pp1) == 0);
330 assert(page_alloc(&pp2) == 0);
331 assert(pp0);
332 assert(pp1 && pp1 != pp0);
333 assert(pp2 && pp2 != pp1 && pp2 != pp0);
334 assert(page_alloc(&pp) == -E_NO_MEM);
336 // give free list back
337 page_free_list = fl;
339 // free the pages we took
340 page_free(pp0);
341 page_free(pp1);
342 page_free(pp2);
344 cprintf("check_page_alloc() succeeded!\n");
348 // Checks that the kernel part of virtual address space
349 // has been setup roughly correctly(by i386_vm_init()).
351 // This function doesn't test every corner case,
352 // in fact it doesn't test the permission bits at all,
353 // but it is a pretty good sanity check.
355 static physaddr_t check_va2pa(pde_t *pgdir, uintptr_t va);
357 static void
358 check_boot_pgdir(void)
360 uint32_t i, n;
361 pde_t *pgdir;
363 pgdir = boot_pgdir;
365 // check pages array
366 n = ROUNDUP(npage*sizeof(struct Page), PGSIZE);
367 for (i = 0; i < n; i += PGSIZE)
368 assert(check_va2pa(pgdir, UPAGES + i) == PADDR(pages) + i);
371 // check phys mem
372 for (i = 0; i < npage; i += PGSIZE)
373 assert(check_va2pa(pgdir, KERNBASE + i) == i);
375 // check kernel stack
376 for (i = 0; i < KSTKSIZE; i += PGSIZE)
377 assert(check_va2pa(pgdir, KSTACKTOP - KSTKSIZE + i) == PADDR(bootstack) + i);
379 // check for zero/non-zero in PDEs
380 for (i = 0; i < NPDENTRIES; i++) {
381 switch (i) {
382 case PDX(VPT):
383 case PDX(UVPT):
384 case PDX(KSTACKTOP-1):
385 case PDX(UPAGES):
386 assert(pgdir[i]);
387 break;
388 default:
389 if (i >= PDX(KERNBASE))
390 assert(pgdir[i]);
391 else
392 assert(pgdir[i] == 0);
393 break;
396 cprintf("check_boot_pgdir() succeeded!\n");
399 // This function returns the physical address of the page containing 'va',
400 // defined by the page directory 'pgdir'. The hardware normally performs
401 // this functionality for us! We define our own version to help check
402 // the check_boot_pgdir() function; it shouldn't be used elsewhere.
404 static physaddr_t
405 check_va2pa(pde_t *pgdir, uintptr_t va)
407 pte_t *p;
409 pgdir = &pgdir[PDX(va)];
410 if (!(*pgdir & PTE_P))
411 return ~0;
412 p = (pte_t*) KADDR(PTE_ADDR(*pgdir));
413 if (!(p[PTX(va)] & PTE_P))
414 return ~0;
415 return PTE_ADDR(p[PTX(va)]);
418 // --------------------------------------------------------------
419 // Tracking of physical pages.
420 // The 'pages' array has one 'struct Page' entry per physical page.
421 // Pages are reference counted, and free pages are kept on a linked list.
422 // --------------------------------------------------------------
425 // Initialize page structure and memory free list.
426 // After this point, ONLY use the functions below
427 // to allocate and deallocate physical memory via the page_free_list,
428 // and NEVER use boot_alloc()
430 void
431 page_init(void)
433 // The example code here marks all pages as free.
434 // However this is not truly the case. What memory is free?
435 // 1) Mark page 0 as in use.
436 // This way we preserve the real-mode IDT and BIOS structures
437 // in case we ever need them. (Currently we don't, but...)
438 // 2) Mark the rest of base memory as free.
439 // 3) Then comes the IO hole [IOPHYSMEM, EXTPHYSMEM).
440 // Mark it as in use so that it can never be allocated.
441 // 4) Then extended memory [EXTPHYSMEM, ...).
442 // Some of it is in use, some is free. Where is the kernel?
443 // Which pages are used for page tables and other data structures?
445 // Change the code to reflect this.
446 struct Page *ppage;
447 unsigned int i;
448 extern char _start[];
450 LIST_INIT(&page_free_list);
451 for (i = 0; i < npage; i++) {
452 pages[i].pp_ref = 0;
453 LIST_INSERT_HEAD(&page_free_list, &pages[i], pp_link);
456 // mark page 0 as in use
457 LIST_REMOVE(&pages[0], pp_link);
459 // IO hole
460 for (i = IOPHYSMEM; i < EXTPHYSMEM; i += PGSIZE) {
461 ppage = pa2page(i);
462 LIST_REMOVE(ppage, pp_link);
465 // reserve the kernel image and data structures allocated
466 for (i = (unsigned int)_start; i < (unsigned int)boot_freemem; i += PGSIZE) {
467 ppage = pa2page(PADDR(i));
468 LIST_REMOVE(ppage, pp_link);
473 // Initialize a Page structure.
474 // The result has null links and 0 refcount.
475 // Note that the corresponding physical page is NOT initialized!
477 static void
478 page_initpp(struct Page *pp)
480 memset(pp, 0, sizeof(*pp));
484 // Allocates a physical page.
485 // Does NOT set the contents of the physical page to zero -
486 // the caller must do that if necessary.
488 // *pp_store -- is set to point to the Page struct of the newly allocated
489 // page
491 // RETURNS
492 // 0 -- on success
493 // -E_NO_MEM -- otherwise
495 // Hint: use LIST_FIRST, LIST_REMOVE, and page_initpp
496 // Hint: pp_ref should not be incremented
498 page_alloc(struct Page **pp_store)
500 // Fill this function in
501 struct Page *page_avail;
503 if (!LIST_EMPTY(&page_free_list)) {
504 page_avail = LIST_FIRST(&page_free_list);
505 LIST_REMOVE(page_avail, pp_link);
506 page_initpp(page_avail);
508 *pp_store = page_avail;
509 return 0;
511 return -E_NO_MEM;
515 // Return a page to the free list.
516 // (This function should only be called when pp->pp_ref reaches 0.)
518 void
519 page_free(struct Page *pp)
521 // Fill this function in
522 if (pp->pp_ref)
523 panic("page_free: pp->pp_ref is not zero.\n");
525 LIST_INSERT_HEAD(&page_free_list, pp, pp_link);
529 // Decrement the reference count on a page,
530 // freeing it if there are no more refs.
532 void
533 page_decref(struct Page* pp)
535 if (--pp->pp_ref == 0)
536 page_free(pp);
539 // Given 'pgdir', a pointer to a page directory, pgdir_walk returns
540 // a pointer to the page table entry (PTE) for linear address 'va'.
541 // This requires walking the two-level page table structure.
543 // If the relevant page table doesn't exist in the page directory, then:
544 // - If create == 0, pgdir_walk returns NULL.
545 // - Otherwise, pgdir_walk tries to allocate a new page table
546 // with page_alloc. If this fails, pgdir_walk returns NULL.
547 // - pgdir_walk sets pp_ref to 1 for the new page table.
548 // - Finally, pgdir_walk returns a pointer into the new page table.
550 // Hint: you can turn a Page * into the physical address of the
551 // page it refers to with page2pa() from kern/pmap.h.
552 pte_t *
553 pgdir_walk(pde_t *pgdir, const void *va, int create)
555 // Fill this function in
556 return NULL;
560 // Map the physical page 'pp' at virtual address 'va'.
561 // The permissions (the low 12 bits) of the page table
562 // entry should be set to 'perm|PTE_P'.
564 // Details
565 // - If there is already a page mapped at 'va', it is page_remove()d.
566 // - If necessary, on demand, allocates a page table and inserts it into
567 // 'pgdir'.
568 // - pp->pp_ref should be incremented if the insertion succeeds.
569 // - The TLB must be invalidated if a page was formerly present at 'va'.
571 // RETURNS:
572 // 0 on success
573 // -E_NO_MEM, if page table couldn't be allocated
575 // Hint: The TA solution is implemented using pgdir_walk, page_remove,
576 // and page2pa.
579 page_insert(pde_t *pgdir, struct Page *pp, void *va, int perm)
581 // Fill this function in
582 return 0;
586 // Map [la, la+size) of linear address space to physical [pa, pa+size)
587 // in the page table rooted at pgdir. Size is a multiple of PGSIZE.
588 // Use permission bits perm|PTE_P for the entries.
590 // This function is only intended to set up the ``static'' mappings
591 // above UTOP. As such, it should *not* change the pp_ref field on the
592 // mapped pages.
594 // Hint: the TA solution uses pgdir_walk
595 static void
596 boot_map_segment(pde_t *pgdir, uintptr_t la, size_t size, physaddr_t pa, int perm)
598 // Fill this function in
602 // Return the page mapped at virtual address 'va'.
603 // If pte_store is not zero, then we store in it the address
604 // of the pte for this page. This is used by page_remove
605 // but should not be used by other callers.
607 // Return 0 if there is no page mapped at va.
609 // Hint: the TA solution uses pgdir_walk and pa2page.
611 struct Page *
612 page_lookup(pde_t *pgdir, void *va, pte_t **pte_store)
614 // Fill this function in
615 return NULL;
619 // Unmaps the physical page at virtual address 'va'.
620 // If there is no physical page at that address, silently does nothing.
622 // Details:
623 // - The ref count on the physical page should decrement.
624 // - The physical page should be freed if the refcount reaches 0.
625 // - The pg table entry corresponding to 'va' should be set to 0.
626 // (if such a PTE exists)
627 // - The TLB must be invalidated if you remove an entry from
628 // the pg dir/pg table.
630 // Hint: The TA solution is implemented using page_lookup,
631 // tlb_invalidate, and page_decref.
633 void
634 page_remove(pde_t *pgdir, void *va)
636 // Fill this function in
640 // Invalidate a TLB entry, but only if the page tables being
641 // edited are the ones currently in use by the processor.
643 void
644 tlb_invalidate(pde_t *pgdir, void *va)
646 // Flush the entry only if we're modifying the current address space.
647 // For now, there is only one address space, so always invalidate.
648 invlpg(va);
651 // check page_insert, page_remove, &c
652 static void
653 page_check(void)
655 struct Page *pp, *pp0, *pp1, *pp2;
656 struct Page_list fl;
657 pte_t *ptep, *ptep1;
658 void *va;
659 int i;
661 // should be able to allocate three pages
662 pp0 = pp1 = pp2 = 0;
663 assert(page_alloc(&pp0) == 0);
664 assert(page_alloc(&pp1) == 0);
665 assert(page_alloc(&pp2) == 0);
667 assert(pp0);
668 assert(pp1 && pp1 != pp0);
669 assert(pp2 && pp2 != pp1 && pp2 != pp0);
671 // temporarily steal the rest of the free pages
672 fl = page_free_list;
673 LIST_INIT(&page_free_list);
675 // should be no free memory
676 assert(page_alloc(&pp) == -E_NO_MEM);
678 // there is no page allocated at address 0
679 assert(page_lookup(boot_pgdir, (void *) 0x0, &ptep) == NULL);
681 // there is no free memory, so we can't allocate a page table
682 assert(page_insert(boot_pgdir, pp1, 0x0, 0) < 0);
684 // free pp0 and try again: pp0 should be used for page table
685 page_free(pp0);
686 assert(page_insert(boot_pgdir, pp1, 0x0, 0) == 0);
687 assert(PTE_ADDR(boot_pgdir[0]) == page2pa(pp0));
688 assert(check_va2pa(boot_pgdir, 0x0) == page2pa(pp1));
689 assert(pp1->pp_ref == 1);
690 assert(pp0->pp_ref == 1);
692 // should be able to map pp2 at PGSIZE because pp0 is already allocated for page table
693 assert(page_insert(boot_pgdir, pp2, (void*) PGSIZE, 0) == 0);
694 assert(check_va2pa(boot_pgdir, PGSIZE) == page2pa(pp2));
695 assert(pp2->pp_ref == 1);
697 // should be no free memory
698 assert(page_alloc(&pp) == -E_NO_MEM);
700 // should be able to map pp2 at PGSIZE because it's already there
701 assert(page_insert(boot_pgdir, pp2, (void*) PGSIZE, 0) == 0);
702 assert(check_va2pa(boot_pgdir, PGSIZE) == page2pa(pp2));
703 assert(pp2->pp_ref == 1);
705 // pp2 should NOT be on the free list
706 // could happen in ref counts are handled sloppily in page_insert
707 assert(page_alloc(&pp) == -E_NO_MEM);
709 // check that pgdir_walk returns a pointer to the pte
710 ptep = KADDR(PTE_ADDR(boot_pgdir[PDX(PGSIZE)]));
711 assert(pgdir_walk(boot_pgdir, (void*)PGSIZE, 0) == ptep+PTX(PGSIZE));
713 // should be able to change permissions too.
714 assert(page_insert(boot_pgdir, pp2, (void*) PGSIZE, PTE_U) == 0);
715 assert(check_va2pa(boot_pgdir, PGSIZE) == page2pa(pp2));
716 assert(pp2->pp_ref == 1);
717 assert(*pgdir_walk(boot_pgdir, (void*) PGSIZE, 0) & PTE_U);
719 // should not be able to map at PTSIZE because need free page for page table
720 assert(page_insert(boot_pgdir, pp0, (void*) PTSIZE, 0) < 0);
722 // insert pp1 at PGSIZE (replacing pp2)
723 assert(page_insert(boot_pgdir, pp1, (void*) PGSIZE, 0) == 0);
725 // should have pp1 at both 0 and PGSIZE, pp2 nowhere, ...
726 assert(check_va2pa(boot_pgdir, 0) == page2pa(pp1));
727 assert(check_va2pa(boot_pgdir, PGSIZE) == page2pa(pp1));
728 // ... and ref counts should reflect this
729 assert(pp1->pp_ref == 2);
730 assert(pp2->pp_ref == 0);
732 // pp2 should be returned by page_alloc
733 assert(page_alloc(&pp) == 0 && pp == pp2);
735 // unmapping pp1 at 0 should keep pp1 at PGSIZE
736 page_remove(boot_pgdir, 0x0);
737 assert(check_va2pa(boot_pgdir, 0x0) == ~0);
738 assert(check_va2pa(boot_pgdir, PGSIZE) == page2pa(pp1));
739 assert(pp1->pp_ref == 1);
740 assert(pp2->pp_ref == 0);
742 // unmapping pp1 at PGSIZE should free it
743 page_remove(boot_pgdir, (void*) PGSIZE);
744 assert(check_va2pa(boot_pgdir, 0x0) == ~0);
745 assert(check_va2pa(boot_pgdir, PGSIZE) == ~0);
746 assert(pp1->pp_ref == 0);
747 assert(pp2->pp_ref == 0);
749 // so it should be returned by page_alloc
750 assert(page_alloc(&pp) == 0 && pp == pp1);
752 // should be no free memory
753 assert(page_alloc(&pp) == -E_NO_MEM);
755 #if 0
756 // should be able to page_insert to change a page
757 // and see the new data immediately.
758 memset(page2kva(pp1), 1, PGSIZE);
759 memset(page2kva(pp2), 2, PGSIZE);
760 page_insert(boot_pgdir, pp1, 0x0, 0);
761 assert(pp1->pp_ref == 1);
762 assert(*(int*)0 == 0x01010101);
763 page_insert(boot_pgdir, pp2, 0x0, 0);
764 assert(*(int*)0 == 0x02020202);
765 assert(pp2->pp_ref == 1);
766 assert(pp1->pp_ref == 0);
767 page_remove(boot_pgdir, 0x0);
768 assert(pp2->pp_ref == 0);
769 #endif
771 // forcibly take pp0 back
772 assert(PTE_ADDR(boot_pgdir[0]) == page2pa(pp0));
773 boot_pgdir[0] = 0;
774 assert(pp0->pp_ref == 1);
775 pp0->pp_ref = 0;
777 // check pointer arithmetic in pgdir_walk
778 page_free(pp0);
779 va = (void*)(PGSIZE * NPDENTRIES + PGSIZE);
780 ptep = pgdir_walk(boot_pgdir, va, 1);
781 ptep1 = KADDR(PTE_ADDR(boot_pgdir[PDX(va)]));
782 assert(ptep == ptep1 + PTX(va));
783 boot_pgdir[PDX(va)] = 0;
784 pp0->pp_ref = 0;
786 // check that new page tables get cleared
787 memset(page2kva(pp0), 0xFF, PGSIZE);
788 page_free(pp0);
789 pgdir_walk(boot_pgdir, 0x0, 1);
790 ptep = page2kva(pp0);
791 for(i=0; i<NPTENTRIES; i++)
792 assert((ptep[i] & PTE_P) == 0);
793 boot_pgdir[0] = 0;
794 pp0->pp_ref = 0;
796 // give free list back
797 page_free_list = fl;
799 // free the pages we took
800 page_free(pp0);
801 page_free(pp1);
802 page_free(pp2);
804 cprintf("page_check() succeeded!\n");