lab3 finished
[mit-jos.git] / kern / pmap.c
bloba613d5f7fd8feac55131b58d6afab57e8000aec9
1 /* See COPYRIGHT for copyright information. */
3 #include <inc/x86.h>
4 #include <inc/mmu.h>
5 #include <inc/error.h>
6 #include <inc/string.h>
7 #include <inc/assert.h>
9 #include <kern/pmap.h>
10 #include <kern/kclock.h>
11 #include <kern/env.h>
13 // These variables are set by i386_detect_memory()
14 static physaddr_t maxpa; // Maximum physical address
15 size_t npage; // Amount of physical memory (in pages)
16 static size_t basemem; // Amount of base memory (in bytes)
17 static size_t extmem; // Amount of extended memory (in bytes)
19 // These variables are set in i386_vm_init()
20 pde_t* boot_pgdir; // Virtual address of boot time page directory
21 physaddr_t boot_cr3; // Physical address of boot time page directory
22 static char* boot_freemem; // Pointer to next byte of free mem
24 struct Page* pages; // Virtual address of physical page array
25 static struct Page_list page_free_list; // Free list of physical pages
27 // Global descriptor table.
29 // The kernel and user segments are identical (except for the DPL).
30 // To load the SS register, the CPL must equal the DPL. Thus,
31 // we must duplicate the segments for the user and the kernel.
33 struct Segdesc gdt[] =
35 // 0x0 - unused (always faults -- for trapping NULL far pointers)
36 SEG_NULL,
38 // 0x8 - kernel code segment
39 [GD_KT >> 3] = SEG(STA_X | STA_R, 0x0, 0xffffffff, 0),
41 // 0x10 - kernel data segment
42 [GD_KD >> 3] = SEG(STA_W, 0x0, 0xffffffff, 0),
44 // 0x18 - user code segment
45 [GD_UT >> 3] = SEG(STA_X | STA_R, 0x0, 0xffffffff, 3),
47 // 0x20 - user data segment
48 [GD_UD >> 3] = SEG(STA_W, 0x0, 0xffffffff, 3),
50 // 0x28 - tss, initialized in idt_init()
51 [GD_TSS >> 3] = SEG_NULL
54 struct Pseudodesc gdt_pd = {
55 sizeof(gdt) - 1, (unsigned long) gdt
58 static int
59 nvram_read(int r)
61 return mc146818_read(r) | (mc146818_read(r + 1) << 8);
64 void
65 i386_detect_memory(void)
67 // CMOS tells us how many kilobytes there are
68 basemem = ROUNDDOWN(nvram_read(NVRAM_BASELO)*1024, PGSIZE);
69 extmem = ROUNDDOWN(nvram_read(NVRAM_EXTLO)*1024, PGSIZE);
71 // Calculate the maximum physical address based on whether
72 // or not there is any extended memory. See comment in <inc/mmu.h>.
73 if (extmem)
74 maxpa = EXTPHYSMEM + extmem;
75 else
76 maxpa = basemem;
78 npage = maxpa / PGSIZE;
80 cprintf("Physical memory: %dK available, ", (int)(maxpa/1024));
81 cprintf("base = %dK, extended = %dK\n", (int)(basemem/1024), (int)(extmem/1024));
84 // --------------------------------------------------------------
85 // Set up initial memory mappings and turn on MMU.
86 // --------------------------------------------------------------
88 static void check_boot_pgdir(void);
89 static void check_page_alloc();
90 static void page_check(void);
91 static void boot_map_segment(pde_t *pgdir, uintptr_t la, size_t size, physaddr_t pa, int perm);
94 // A simple physical memory allocator, used only a few times
95 // in the process of setting up the virtual memory system.
96 // page_alloc() is the real allocator.
98 // Allocate n bytes of physical memory aligned on an
99 // align-byte boundary. Align must be a power of two.
100 // Return kernel virtual address. Returned memory is uninitialized.
102 // If we're out of memory, boot_alloc should panic.
103 // This function may ONLY be used during initialization,
104 // before the page_free_list has been set up.
106 static void*
107 boot_alloc(uint32_t n, uint32_t align)
109 extern char end[];
110 void *v;
112 // Initialize boot_freemem if this is the first time.
113 // 'end' is a magic symbol automatically generated by the linker,
114 // which points to the end of the kernel's bss segment -
115 // i.e., the first virtual address that the linker
116 // did _not_ assign to any kernel code or global variables.
117 if (boot_freemem == 0)
118 boot_freemem = end;
120 // LAB 2: Your code here:
121 // Step 1: round boot_freemem up to be aligned properly
122 // Step 2: save current value of boot_freemem as allocated chunk
123 // Step 3: increase boot_freemem to record allocation
124 // Step 4: return allocated chunk
126 // if align is zero, I just bypass the test
127 // i.e. I think zero as a power of two, which you may not agree with.
128 if (align) {
129 if (align & (align - 1))
130 panic("boot_alloc : align is not a power of two.\n");
131 boot_freemem = ROUNDUP(boot_freemem, align);
134 v = (void *)boot_freemem;
135 boot_freemem += n;
137 if ((unsigned int)boot_freemem < (unsigned int)end)
138 panic("boot_alloc: out of memory.\n");
139 else
140 return v;
143 // Set up a two-level page table:
144 // boot_pgdir is its linear (virtual) address of the root
145 // boot_cr3 is the physical adresss of the root
146 // Then turn on paging. Then effectively turn off segmentation.
147 // (i.e., the segment base addrs are set to zero).
149 // This function only sets up the kernel part of the address space
150 // (ie. addresses >= UTOP). The user part of the address space
151 // will be setup later.
153 // From UTOP to ULIM, the user is allowed to read but not write.
154 // Above ULIM the user cannot read (or write).
155 void
156 i386_vm_init(void)
158 pde_t* pgdir;
159 uint32_t cr0;
160 size_t page_size, env_size;
162 //////////////////////////////////////////////////////////////////////
163 // create initial page directory.
164 pgdir = boot_alloc(PGSIZE, PGSIZE);
165 memset(pgdir, 0, PGSIZE);
166 boot_pgdir = pgdir;
167 boot_cr3 = PADDR(pgdir);
169 //////////////////////////////////////////////////////////////////////
170 // Recursively insert PD in itself as a page table, to form
171 // a virtual page table at virtual address VPT.
172 // (For now, you don't have understand the greater purpose of the
173 // following two lines.)
175 // Permissions: kernel RW, user NONE
176 pgdir[PDX(VPT)] = PADDR(pgdir)|PTE_W|PTE_P;
178 // same for UVPT
179 // Permissions: kernel R, user R
180 pgdir[PDX(UVPT)] = PADDR(pgdir)|PTE_U|PTE_P;
182 //////////////////////////////////////////////////////////////////////
183 // Make 'pages' point to an array of size 'npage' of 'struct Page'.
184 // The kernel uses this structure to keep track of physical pages;
185 // 'npage' equals the number of physical pages in memory. User-level
186 // programs will get read-only access to the array as well.
187 // You must allocate the array yourself.
188 // Your code goes here:
189 page_size = ROUNDUP((sizeof(struct Page) * npage), PGSIZE);
190 pages = boot_alloc(page_size, PGSIZE);
192 //////////////////////////////////////////////////////////////////////
193 // Make 'envs' point to an array of size 'NENV' of 'struct Env'.
194 // LAB 3: Your code here.
195 env_size = ROUNDUP((sizeof(struct Env) * NENV), PGSIZE);
196 envs = boot_alloc(env_size, PGSIZE);
198 //////////////////////////////////////////////////////////////////////
199 // Now that we've allocated the initial kernel data structures, we set
200 // up the list of free physical pages. Once we've done so, all further
201 // memory management will go through the page_* functions. In
202 // particular, we can now map memory using boot_map_segment or page_insert
203 page_init();
205 check_page_alloc();
207 page_check();
209 //////////////////////////////////////////////////////////////////////
210 // Now we set up virtual memory
212 //////////////////////////////////////////////////////////////////////
213 // Map 'pages' read-only by the user at linear address UPAGES
214 // (ie. perm = PTE_U | PTE_P)
215 // Permissions:
216 // - pages -- kernel RW, user NONE
217 // - the read-only version mapped at UPAGES -- kernel R, user R
218 // Your code goes here:
219 boot_map_segment(pgdir, UPAGES, page_size, PADDR(pages), PTE_U);
221 //////////////////////////////////////////////////////////////////////
222 // Map the 'envs' array read-only by the user at linear address UENVS
223 // (ie. perm = PTE_U | PTE_P).
224 // Permissions:
225 // - envs itself -- kernel RW, user NONE
226 // - the image of envs mapped at UENVS -- kernel R, user R
227 boot_map_segment(pgdir, UENVS, env_size, PADDR(envs), PTE_U);
229 //////////////////////////////////////////////////////////////////////
230 // Map the kernel stack (symbol name "bootstack"). The complete VA
231 // range of the stack, [KSTACKTOP-PTSIZE, KSTACKTOP), breaks into two
232 // pieces:
233 // * [KSTACKTOP-KSTKSIZE, KSTACKTOP) -- backed by physical memory
234 // * [KSTACKTOP-PTSIZE, KSTACKTOP-KSTKSIZE) -- not backed => faults
235 // Permissions: kernel RW, user NONE
236 // Your code goes here:
237 boot_map_segment(pgdir, KSTACKTOP-KSTKSIZE, KSTKSIZE, PADDR(bootstack), PTE_W);
239 //////////////////////////////////////////////////////////////////////
240 // Map all of physical memory at KERNBASE.
241 // Ie. the VA range [KERNBASE, 2^32) should map to
242 // the PA range [0, 2^32 - KERNBASE)
243 // We might not have 2^32 - KERNBASE bytes of physical memory, but
244 // we just set up the amapping anyway.
245 // Permissions: kernel RW, user NONE
246 // Your code goes here:
247 boot_map_segment(pgdir, KERNBASE, 0xffffffff-KERNBASE+1, 0, PTE_W);
249 // Check that the initial page directory has been set up correctly.
250 check_boot_pgdir();
252 //////////////////////////////////////////////////////////////////////
253 // On x86, segmentation maps a VA to a LA (linear addr) and
254 // paging maps the LA to a PA. I.e. VA => LA => PA. If paging is
255 // turned off the LA is used as the PA. Note: there is no way to
256 // turn off segmentation. The closest thing is to set the base
257 // address to 0, so the VA => LA mapping is the identity.
259 // Current mapping: VA KERNBASE+x => PA x.
260 // (segmentation base=-KERNBASE and paging is off)
262 // From here on down we must maintain this VA KERNBASE + x => PA x
263 // mapping, even though we are turning on paging and reconfiguring
264 // segmentation.
266 // Map VA 0:4MB same as VA KERNBASE, i.e. to PA 0:4MB.
267 // (Limits our kernel to <4MB)
268 pgdir[0] = pgdir[PDX(KERNBASE)];
270 // Install page table.
271 lcr3(boot_cr3);
273 // Turn on paging.
274 cr0 = rcr0();
275 cr0 |= CR0_PE|CR0_PG|CR0_AM|CR0_WP|CR0_NE|CR0_TS|CR0_EM|CR0_MP;
276 cr0 &= ~(CR0_TS|CR0_EM);
277 lcr0(cr0);
279 // Current mapping: KERNBASE+x => x => x.
280 // (x < 4MB so uses paging pgdir[0])
282 // Reload all segment registers.
283 asm volatile("lgdt gdt_pd");
284 asm volatile("movw %%ax,%%gs" :: "a" (GD_UD|3));
285 asm volatile("movw %%ax,%%fs" :: "a" (GD_UD|3));
286 asm volatile("movw %%ax,%%es" :: "a" (GD_KD));
287 asm volatile("movw %%ax,%%ds" :: "a" (GD_KD));
288 asm volatile("movw %%ax,%%ss" :: "a" (GD_KD));
289 asm volatile("ljmp %0,$1f\n 1:\n" :: "i" (GD_KT)); // reload cs
290 asm volatile("lldt %%ax" :: "a" (0));
292 // Final mapping: KERNBASE+x => KERNBASE+x => x.
294 // This mapping was only used after paging was turned on but
295 // before the segment registers were reloaded.
296 pgdir[0] = 0;
298 // Flush the TLB for good measure, to kill the pgdir[0] mapping.
299 lcr3(boot_cr3);
303 // Check the physical page allocator (page_alloc(), page_free(),
304 // and page_init()).
306 static void
307 check_page_alloc()
309 struct Page *pp, *pp0, *pp1, *pp2;
310 struct Page_list fl;
312 // if there's a page that shouldn't be on
313 // the free list, try to make sure it
314 // eventually causes trouble.
315 LIST_FOREACH(pp0, &page_free_list, pp_link)
316 memset(page2kva(pp0), 0x97, 128);
318 // should be able to allocate three pages
319 pp0 = pp1 = pp2 = 0;
320 assert(page_alloc(&pp0) == 0);
321 assert(page_alloc(&pp1) == 0);
322 assert(page_alloc(&pp2) == 0);
324 assert(pp0);
325 assert(pp1 && pp1 != pp0);
326 assert(pp2 && pp2 != pp1 && pp2 != pp0);
327 assert(page2pa(pp0) < npage*PGSIZE);
328 assert(page2pa(pp1) < npage*PGSIZE);
329 assert(page2pa(pp2) < npage*PGSIZE);
331 // temporarily steal the rest of the free pages
332 fl = page_free_list;
333 LIST_INIT(&page_free_list);
335 // should be no free memory
336 assert(page_alloc(&pp) == -E_NO_MEM);
338 // free and re-allocate?
339 page_free(pp0);
340 page_free(pp1);
341 page_free(pp2);
343 pp0 = pp1 = pp2 = 0;
344 assert(page_alloc(&pp0) == 0);
345 assert(page_alloc(&pp1) == 0);
346 assert(page_alloc(&pp2) == 0);
347 assert(pp0);
348 assert(pp1 && pp1 != pp0);
349 assert(pp2 && pp2 != pp1 && pp2 != pp0);
350 assert(page_alloc(&pp) == -E_NO_MEM);
352 // give free list back
353 page_free_list = fl;
355 // free the pages we took
356 page_free(pp0);
357 page_free(pp1);
358 page_free(pp2);
360 cprintf("check_page_alloc() succeeded!\n");
364 // Checks that the kernel part of virtual address space
365 // has been setup roughly correctly(by i386_vm_init()).
367 // This function doesn't test every corner case,
368 // in fact it doesn't test the permission bits at all,
369 // but it is a pretty good sanity check.
371 static physaddr_t check_va2pa(pde_t *pgdir, uintptr_t va);
373 static void
374 check_boot_pgdir(void)
376 uint32_t i, n;
377 pde_t *pgdir;
379 pgdir = boot_pgdir;
381 // check pages array
382 n = ROUNDUP(npage*sizeof(struct Page), PGSIZE);
383 for (i = 0; i < n; i += PGSIZE)
384 assert(check_va2pa(pgdir, UPAGES + i) == PADDR(pages) + i);
386 // check envs array (new test for lab 3)
387 n = ROUNDUP(NENV*sizeof(struct Env), PGSIZE);
388 for (i = 0; i < n; i += PGSIZE)
389 assert(check_va2pa(pgdir, UENVS + i) == PADDR(envs) + i);
391 // check phys mem
392 for (i = 0; i < npage; i += PGSIZE)
393 assert(check_va2pa(pgdir, KERNBASE + i) == i);
395 // check kernel stack
396 for (i = 0; i < KSTKSIZE; i += PGSIZE)
397 assert(check_va2pa(pgdir, KSTACKTOP - KSTKSIZE + i) == PADDR(bootstack) + i);
399 // check for zero/non-zero in PDEs
400 for (i = 0; i < NPDENTRIES; i++) {
401 switch (i) {
402 case PDX(VPT):
403 case PDX(UVPT):
404 case PDX(KSTACKTOP-1):
405 case PDX(UPAGES):
406 case PDX(UENVS):
407 assert(pgdir[i]);
408 break;
409 default:
410 if (i >= PDX(KERNBASE))
411 assert(pgdir[i]);
412 else
413 assert(pgdir[i] == 0);
414 break;
417 cprintf("check_boot_pgdir() succeeded!\n");
420 // This function returns the physical address of the page containing 'va',
421 // defined by the page directory 'pgdir'. The hardware normally performs
422 // this functionality for us! We define our own version to help check
423 // the check_boot_pgdir() function; it shouldn't be used elsewhere.
425 static physaddr_t
426 check_va2pa(pde_t *pgdir, uintptr_t va)
428 pte_t *p;
430 pgdir = &pgdir[PDX(va)];
431 if (!(*pgdir & PTE_P))
432 return ~0;
433 p = (pte_t*) KADDR(PTE_ADDR(*pgdir));
434 if (!(p[PTX(va)] & PTE_P))
435 return ~0;
436 return PTE_ADDR(p[PTX(va)]);
439 // --------------------------------------------------------------
440 // Tracking of physical pages.
441 // The 'pages' array has one 'struct Page' entry per physical page.
442 // Pages are reference counted, and free pages are kept on a linked list.
443 // --------------------------------------------------------------
446 // Initialize page structure and memory free list.
447 // After this point, ONLY use the functions below
448 // to allocate and deallocate physical memory via the page_free_list,
449 // and NEVER use boot_alloc()
451 void
452 page_init(void)
454 // The example code here marks all pages as free.
455 // However this is not truly the case. What memory is free?
456 // 1) Mark page 0 as in use.
457 // This way we preserve the real-mode IDT and BIOS structures
458 // in case we ever need them. (Currently we don't, but...)
459 // 2) Mark the rest of base memory as free.
460 // 3) Then comes the IO hole [IOPHYSMEM, EXTPHYSMEM).
461 // Mark it as in use so that it can never be allocated.
462 // 4) Then extended memory [EXTPHYSMEM, ...).
463 // Some of it is in use, some is free. Where is the kernel?
464 // Which pages are used for page tables and other data structures?
466 // Change the code to reflect this.
467 struct Page *ppage;
468 unsigned int i;
469 extern char _start[];
471 LIST_INIT(&page_free_list);
472 for (i = 0; i < npage; i++) {
473 pages[i].pp_ref = 0;
474 LIST_INSERT_HEAD(&page_free_list, &pages[i], pp_link);
477 // mark page 0 as in use
478 LIST_REMOVE(&pages[0], pp_link);
480 // IO hole
481 for (i = IOPHYSMEM; i < EXTPHYSMEM; i += PGSIZE) {
482 ppage = pa2page(i);
483 LIST_REMOVE(ppage, pp_link);
486 // reserve the kernel image and data structures allocated
487 for (i = (unsigned int)_start; i < (unsigned int)boot_freemem; i += PGSIZE) {
488 ppage = pa2page(PADDR(i));
489 LIST_REMOVE(ppage, pp_link);
494 // Initialize a Page structure.
495 // The result has null links and 0 refcount.
496 // Note that the corresponding physical page is NOT initialized!
498 static void
499 page_initpp(struct Page *pp)
501 memset(pp, 0, sizeof(*pp));
505 // Allocates a physical page.
506 // Does NOT set the contents of the physical page to zero -
507 // the caller must do that if necessary.
509 // *pp_store -- is set to point to the Page struct of the newly allocated
510 // page
512 // RETURNS
513 // 0 -- on success
514 // -E_NO_MEM -- otherwise
516 // Hint: use LIST_FIRST, LIST_REMOVE, and page_initpp
517 // Hint: pp_ref should not be incremented
519 page_alloc(struct Page **pp_store)
521 // Fill this function in
522 struct Page *page_avail;
524 if (!LIST_EMPTY(&page_free_list)) {
525 page_avail = LIST_FIRST(&page_free_list);
526 LIST_REMOVE(page_avail, pp_link);
527 page_initpp(page_avail);
529 *pp_store = page_avail;
530 return 0;
532 return -E_NO_MEM;
536 // Return a page to the free list.
537 // (This function should only be called when pp->pp_ref reaches 0.)
539 void
540 page_free(struct Page *pp)
542 // Fill this function in
543 if (pp->pp_ref)
544 panic("page_free: pp->pp_ref is not zero.\n");
546 LIST_INSERT_HEAD(&page_free_list, pp, pp_link);
550 // Decrement the reference count on a page,
551 // freeing it if there are no more refs.
553 void
554 page_decref(struct Page* pp)
556 if (--pp->pp_ref == 0)
557 page_free(pp);
560 // Given 'pgdir', a pointer to a page directory, pgdir_walk returns
561 // a pointer to the page table entry (PTE) for linear address 'va'.
562 // This requires walking the two-level page table structure.
564 // If the relevant page table doesn't exist in the page directory, then:
565 // - If create == 0, pgdir_walk returns NULL.
566 // - Otherwise, pgdir_walk tries to allocate a new page table
567 // with page_alloc. If this fails, pgdir_walk returns NULL.
568 // - pgdir_walk sets pp_ref to 1 for the new page table.
569 // - Finally, pgdir_walk returns a pointer into the new page table.
571 // Hint: you can turn a Page * into the physical address of the
572 // page it refers to with page2pa() from kern/pmap.h.
573 pte_t *
574 pgdir_walk(pde_t *pgdir, const void *va, int create)
576 // Fill this function in
577 unsigned int pde;
578 unsigned int la = (unsigned int)va;
579 struct Page *free_page;
581 pde = pgdir[PDX(la)];
582 if (pde & PTE_P)
583 return (pte_t *)KADDR(PTE_ADDR(pde)) + PTX(la);
584 else if (!create)
585 return NULL;
586 else {
587 if (page_alloc(&free_page) == 0) {
588 // clear the actual physical page
589 memset(page2kva(free_page), 0, PGSIZE);
590 // setup the specific entry
591 pgdir[PDX(la)] = page2pa(free_page) | PTE_W | PTE_U | PTE_P;
592 // increase the pp_ref field
593 free_page->pp_ref++;
594 return (pte_t *)KADDR(PTE_ADDR(pgdir[PDX(la)])) + PTX(la);
596 return NULL;
601 // Map the physical page 'pp' at virtual address 'va'.
602 // The permissions (the low 12 bits) of the page table
603 // entry should be set to 'perm|PTE_P'.
605 // Details
606 // - If there is already a page mapped at 'va', it is page_remove()d.
607 // - If necessary, on demand, allocates a page table and inserts it into
608 // 'pgdir'.
609 // - pp->pp_ref should be incremented if the insertion succeeds.
610 // - The TLB must be invalidated if a page was formerly present at 'va'.
612 // RETURNS:
613 // 0 on success
614 // -E_NO_MEM, if page table couldn't be allocated
616 // Hint: The TA solution is implemented using pgdir_walk, page_remove,
617 // and page2pa.
620 page_insert(pde_t *pgdir, struct Page *pp, void *va, int perm)
622 // Fill this function in
623 pte_t *pte;
625 pte = pgdir_walk(pgdir, va, 1);
626 // truncate 'perm' to the right bits
627 perm &= 0xfff;
629 if (pte == NULL)
630 return -E_NO_MEM;
632 if (*pte & PTE_P) {
633 if (PTE_ADDR(*pte) != page2pa(pp)) {
634 // already a page mapped at 'va'
635 page_remove(pgdir, va);
636 *pte = page2pa(pp) | perm | PTE_P;
637 // the ref is incremented, because it is used in the mapping.
638 pp->pp_ref++;
639 tlb_invalidate(pgdir, va);
640 } else {
641 // the same page mapped at 'va'
642 // learn from 'page_check', the permission may change
643 *pte |= perm;
645 } else {
646 *pte = page2pa(pp) | perm | PTE_P;
647 // the ref is incremented, because it is used in the mapping.
648 pp->pp_ref++;
651 return 0;
655 // Map [la, la+size) of linear address space to physical [pa, pa+size)
656 // in the page table rooted at pgdir. Size is a multiple of PGSIZE.
657 // Use permission bits perm|PTE_P for the entries.
659 // This function is only intended to set up the ``static'' mappings
660 // above UTOP. As such, it should *not* change the pp_ref field on the
661 // mapped pages.
663 // Hint: the TA solution uses pgdir_walk
664 static void
665 boot_map_segment(pde_t *pgdir, uintptr_t la, size_t size, physaddr_t pa, int perm)
667 // Fill this function in
668 pte_t *pt;
669 int i;
671 perm &= 0xfff;
672 for (i = 0; i < size; i += PGSIZE) {
673 pt = pgdir_walk(pgdir, (void *)(la + i), 1);
674 *pt = PTE_ADDR(pa + i) | perm | PTE_P;
679 // Return the page mapped at virtual address 'va'.
680 // If pte_store is not zero, then we store in it the address
681 // of the pte for this page. This is used by page_remove
682 // but should not be used by other callers.
684 // Return 0 if there is no page mapped at va.
686 // Hint: the TA solution uses pgdir_walk and pa2page.
688 struct Page *
689 page_lookup(pde_t *pgdir, void *va, pte_t **pte_store)
691 // Fill this function in
692 pte_t *pte;
693 pte_t entry;
695 pte = pgdir_walk(pgdir, va, 0);
696 if (pte == NULL)
697 return NULL;
699 entry = *pte;
700 if (!(entry & PTE_P))
701 return NULL;
703 if (PPN(PTE_ADDR(entry)) >= npage)
704 return NULL;
706 if (pte_store)
707 *pte_store = pte;
709 return &pages[PPN(PTE_ADDR(entry))];
713 // Unmaps the physical page at virtual address 'va'.
714 // If there is no physical page at that address, silently does nothing.
716 // Details:
717 // - The ref count on the physical page should decrement.
718 // - The physical page should be freed if the refcount reaches 0.
719 // - The pg table entry corresponding to 'va' should be set to 0.
720 // (if such a PTE exists)
721 // - The TLB must be invalidated if you remove an entry from
722 // the pg dir/pg table.
724 // Hint: The TA solution is implemented using page_lookup,
725 // tlb_invalidate, and page_decref.
727 void
728 page_remove(pde_t *pgdir, void *va)
730 // Fill this function in
731 pte_t *entry;
732 struct Page *pp;
734 pp = page_lookup(pgdir, va, &entry);
735 if (pp) {
736 page_decref(pp);
737 *entry = 0;
738 tlb_invalidate(pgdir, va);
743 // Invalidate a TLB entry, but only if the page tables being
744 // edited are the ones currently in use by the processor.
746 void
747 tlb_invalidate(pde_t *pgdir, void *va)
749 // Flush the entry only if we're modifying the current address space.
750 // For now, there is only one address space, so always invalidate.
751 invlpg(va);
754 static uintptr_t user_mem_check_addr;
757 // Check that an environment is allowed to access the range of memory
758 // [va, va+len) with permissions 'perm | PTE_P'.
759 // Normally 'perm' will contain PTE_U at least, but this is not required.
760 // 'va' and 'len' need not be page-aligned; you must test every page that
761 // contains any of that range. You will test either 'len/PGSIZE',
762 // 'len/PGSIZE + 1', or 'len/PGSIZE + 2' pages.
764 // A user program can access a virtual address if (1) the address is below
765 // ULIM, and (2) the page table gives it permission. These are exactly
766 // the tests you should implement here.
768 // If there is an error, set the 'user_mem_check_addr' variable to the first
769 // erroneous virtual address.
771 // Returns 0 if the user program can access this range of addresses,
772 // and -E_FAULT otherwise.
775 user_mem_check(struct Env *env, const void *va, size_t len, int perm)
777 // LAB 3: Your code here.
778 unsigned int va_start, va_end;
779 pte_t *pte;
781 perm |= PTE_P;
782 user_mem_check_addr = (uintptr_t)va;
783 // make 'va_start' and 'va_end' page-aligned.
784 va_start = ROUNDDOWN((unsigned int)va, PGSIZE);
785 va_end = ROUNDUP((unsigned int)(va + len), PGSIZE);
787 while (va_start < va_end) {
788 // check whether the address is below ULIM
789 if (va_start >= ULIM)
790 return -E_FAULT;
792 // check the page table permission
793 pte = pgdir_walk(env->env_pgdir, (void *)va_start, 0);
794 if (!pte)
795 return -E_FAULT;
797 if ((*pte & perm) != perm)
798 return -E_FAULT;
800 // update 'va_start' and 'user_mem_check_addr'
801 va_start += PGSIZE;
802 user_mem_check_addr = va_start;
805 return 0;
809 // Checks that environment 'env' is allowed to access the range
810 // of memory [va, va+len) with permissions 'perm | PTE_U'.
811 // If it can, then the function simply returns.
812 // If it cannot, 'env' is destroyed.
814 void
815 user_mem_assert(struct Env *env, const void *va, size_t len, int perm)
817 if (user_mem_check(env, va, len, perm | PTE_U) < 0) {
818 cprintf("[%08x] user_mem_check assertion failure for "
819 "va %08x\n", curenv->env_id, user_mem_check_addr);
820 env_destroy(env); // may not return
824 // check page_insert, page_remove, &c
825 static void
826 page_check(void)
828 struct Page *pp, *pp0, *pp1, *pp2;
829 struct Page_list fl;
830 pte_t *ptep, *ptep1;
831 void *va;
832 int i;
834 // should be able to allocate three pages
835 pp0 = pp1 = pp2 = 0;
836 assert(page_alloc(&pp0) == 0);
837 assert(page_alloc(&pp1) == 0);
838 assert(page_alloc(&pp2) == 0);
840 assert(pp0);
841 assert(pp1 && pp1 != pp0);
842 assert(pp2 && pp2 != pp1 && pp2 != pp0);
844 // temporarily steal the rest of the free pages
845 fl = page_free_list;
846 LIST_INIT(&page_free_list);
848 // should be no free memory
849 assert(page_alloc(&pp) == -E_NO_MEM);
851 // there is no page allocated at address 0
852 assert(page_lookup(boot_pgdir, (void *) 0x0, &ptep) == NULL);
854 // there is no free memory, so we can't allocate a page table
855 assert(page_insert(boot_pgdir, pp1, 0x0, 0) < 0);
857 // free pp0 and try again: pp0 should be used for page table
858 page_free(pp0);
859 assert(page_insert(boot_pgdir, pp1, 0x0, 0) == 0);
860 assert(PTE_ADDR(boot_pgdir[0]) == page2pa(pp0));
861 assert(check_va2pa(boot_pgdir, 0x0) == page2pa(pp1));
862 assert(pp1->pp_ref == 1);
863 assert(pp0->pp_ref == 1);
865 // should be able to map pp2 at PGSIZE because pp0 is already allocated for page table
866 assert(page_insert(boot_pgdir, pp2, (void*) PGSIZE, 0) == 0);
867 assert(check_va2pa(boot_pgdir, PGSIZE) == page2pa(pp2));
868 assert(pp2->pp_ref == 1);
870 // should be no free memory
871 assert(page_alloc(&pp) == -E_NO_MEM);
873 // should be able to map pp2 at PGSIZE because it's already there
874 assert(page_insert(boot_pgdir, pp2, (void*) PGSIZE, 0) == 0);
875 assert(check_va2pa(boot_pgdir, PGSIZE) == page2pa(pp2));
876 assert(pp2->pp_ref == 1);
878 // pp2 should NOT be on the free list
879 // could happen in ref counts are handled sloppily in page_insert
880 assert(page_alloc(&pp) == -E_NO_MEM);
882 // check that pgdir_walk returns a pointer to the pte
883 ptep = KADDR(PTE_ADDR(boot_pgdir[PDX(PGSIZE)]));
884 assert(pgdir_walk(boot_pgdir, (void*)PGSIZE, 0) == ptep+PTX(PGSIZE));
886 // should be able to change permissions too.
887 assert(page_insert(boot_pgdir, pp2, (void*) PGSIZE, PTE_U) == 0);
888 assert(check_va2pa(boot_pgdir, PGSIZE) == page2pa(pp2));
889 assert(pp2->pp_ref == 1);
890 assert(*pgdir_walk(boot_pgdir, (void*) PGSIZE, 0) & PTE_U);
892 // should not be able to map at PTSIZE because need free page for page table
893 assert(page_insert(boot_pgdir, pp0, (void*) PTSIZE, 0) < 0);
895 // insert pp1 at PGSIZE (replacing pp2)
896 assert(page_insert(boot_pgdir, pp1, (void*) PGSIZE, 0) == 0);
898 // should have pp1 at both 0 and PGSIZE, pp2 nowhere, ...
899 assert(check_va2pa(boot_pgdir, 0) == page2pa(pp1));
900 assert(check_va2pa(boot_pgdir, PGSIZE) == page2pa(pp1));
901 // ... and ref counts should reflect this
902 assert(pp1->pp_ref == 2);
903 assert(pp2->pp_ref == 0);
905 // pp2 should be returned by page_alloc
906 assert(page_alloc(&pp) == 0 && pp == pp2);
908 // unmapping pp1 at 0 should keep pp1 at PGSIZE
909 page_remove(boot_pgdir, 0x0);
910 assert(check_va2pa(boot_pgdir, 0x0) == ~0);
911 assert(check_va2pa(boot_pgdir, PGSIZE) == page2pa(pp1));
912 assert(pp1->pp_ref == 1);
913 assert(pp2->pp_ref == 0);
915 // unmapping pp1 at PGSIZE should free it
916 page_remove(boot_pgdir, (void*) PGSIZE);
917 assert(check_va2pa(boot_pgdir, 0x0) == ~0);
918 assert(check_va2pa(boot_pgdir, PGSIZE) == ~0);
919 assert(pp1->pp_ref == 0);
920 assert(pp2->pp_ref == 0);
922 // so it should be returned by page_alloc
923 assert(page_alloc(&pp) == 0 && pp == pp1);
925 // should be no free memory
926 assert(page_alloc(&pp) == -E_NO_MEM);
928 #if 0
929 // should be able to page_insert to change a page
930 // and see the new data immediately.
931 memset(page2kva(pp1), 1, PGSIZE);
932 memset(page2kva(pp2), 2, PGSIZE);
933 page_insert(boot_pgdir, pp1, 0x0, 0);
934 assert(pp1->pp_ref == 1);
935 assert(*(int*)0 == 0x01010101);
936 page_insert(boot_pgdir, pp2, 0x0, 0);
937 assert(*(int*)0 == 0x02020202);
938 assert(pp2->pp_ref == 1);
939 assert(pp1->pp_ref == 0);
940 page_remove(boot_pgdir, 0x0);
941 assert(pp2->pp_ref == 0);
942 #endif
944 // forcibly take pp0 back
945 assert(PTE_ADDR(boot_pgdir[0]) == page2pa(pp0));
946 boot_pgdir[0] = 0;
947 assert(pp0->pp_ref == 1);
948 pp0->pp_ref = 0;
950 // check pointer arithmetic in pgdir_walk
951 page_free(pp0);
952 va = (void*)(PGSIZE * NPDENTRIES + PGSIZE);
953 ptep = pgdir_walk(boot_pgdir, va, 1);
954 ptep1 = KADDR(PTE_ADDR(boot_pgdir[PDX(va)]));
955 assert(ptep == ptep1 + PTX(va));
956 boot_pgdir[PDX(va)] = 0;
957 pp0->pp_ref = 0;
959 // check that new page tables get cleared
960 memset(page2kva(pp0), 0xFF, PGSIZE);
961 page_free(pp0);
962 pgdir_walk(boot_pgdir, 0x0, 1);
963 ptep = page2kva(pp0);
964 for(i=0; i<NPTENTRIES; i++)
965 assert((ptep[i] & PTE_P) == 0);
966 boot_pgdir[0] = 0;
967 pp0->pp_ref = 0;
969 // give free list back
970 page_free_list = fl;
972 // free the pages we took
973 page_free(pp0);
974 page_free(pp1);
975 page_free(pp2);
977 cprintf("page_check() succeeded!\n");