added base src
[xv6-db.git] / vm.c
blob54d4bf486914ae219216cb9b95e083958d6dcc60
1 #include "param.h"
2 #include "types.h"
3 #include "defs.h"
4 #include "x86.h"
5 #include "mmu.h"
6 #include "proc.h"
7 #include "elf.h"
9 extern char data[]; // defined in data.S
11 static pde_t *kpgdir; // for use in scheduler()
13 // Allocate one page table for the machine for the kernel address
14 // space for scheduler processes.
15 void
16 kvmalloc(void)
18 kpgdir = setupkvm();
21 // Set up CPU's kernel segment descriptors.
22 // Run once at boot time on each CPU.
23 void
24 seginit(void)
26 struct cpu *c;
28 // Map virtual addresses to linear addresses using identity map.
29 // Cannot share a CODE descriptor for both kernel and user
30 // because it would have to have DPL_USR, but the CPU forbids
31 // an interrupt from CPL=0 to DPL=3.
32 c = &cpus[cpunum()];
33 c->gdt[SEG_KCODE] = SEG(STA_X|STA_R, 0, 0xffffffff, 0);
34 c->gdt[SEG_KDATA] = SEG(STA_W, 0, 0xffffffff, 0);
35 c->gdt[SEG_UCODE] = SEG(STA_X|STA_R, 0, 0xffffffff, DPL_USER);
36 c->gdt[SEG_UDATA] = SEG(STA_W, 0, 0xffffffff, DPL_USER);
38 // Map cpu, and curproc
39 c->gdt[SEG_KCPU] = SEG(STA_W, &c->cpu, 8, 0);
41 lgdt(c->gdt, sizeof(c->gdt));
42 loadgs(SEG_KCPU << 3);
44 // Initialize cpu-local storage.
45 cpu = c;
46 proc = 0;
49 // Return the address of the PTE in page table pgdir
50 // that corresponds to linear address va. If create!=0,
51 // create any required page table pages.
52 static pte_t *
53 walkpgdir(pde_t *pgdir, const void *va, int create)
55 pde_t *pde;
56 pte_t *pgtab;
58 pde = &pgdir[PDX(va)];
59 if(*pde & PTE_P){
60 pgtab = (pte_t*)PTE_ADDR(*pde);
61 } else {
62 if(!create || (pgtab = (pte_t*)kalloc()) == 0)
63 return 0;
64 // Make sure all those PTE_P bits are zero.
65 memset(pgtab, 0, PGSIZE);
66 // The permissions here are overly generous, but they can
67 // be further restricted by the permissions in the page table
68 // entries, if necessary.
69 *pde = PADDR(pgtab) | PTE_P | PTE_W | PTE_U;
71 return &pgtab[PTX(va)];
74 // Create PTEs for linear addresses starting at la that refer to
75 // physical addresses starting at pa. la and size might not
76 // be page-aligned.
77 static int
78 mappages(pde_t *pgdir, void *la, uint size, uint pa, int perm)
80 char *a, *last;
81 pte_t *pte;
83 a = PGROUNDDOWN(la);
84 last = PGROUNDDOWN(la + size - 1);
85 for(;;){
86 pte = walkpgdir(pgdir, a, 1);
87 if(pte == 0)
88 return -1;
89 if(*pte & PTE_P)
90 panic("remap");
91 *pte = pa | perm | PTE_P;
92 if(a == last)
93 break;
94 a += PGSIZE;
95 pa += PGSIZE;
97 return 0;
100 // The mappings from logical to linear are one to one (i.e.,
101 // segmentation doesn't do anything).
102 // There is one page table per process, plus one that's used
103 // when a CPU is not running any process (kpgdir).
104 // A user process uses the same page table as the kernel; the
105 // page protection bits prevent it from using anything other
106 // than its memory.
108 // setupkvm() and exec() set up every page table like this:
109 // 0..640K : user memory (text, data, stack, heap)
110 // 640K..1M : mapped direct (for IO space)
111 // 1M..end : mapped direct (for the kernel's text and data)
112 // end..PHYSTOP : mapped direct (kernel heap and user pages)
113 // 0xfe000000..0 : mapped direct (devices such as ioapic)
115 // The kernel allocates memory for its heap and for user memory
116 // between kernend and the end of physical memory (PHYSTOP).
117 // The virtual address space of each user program includes the kernel
118 // (which is inaccessible in user mode). The user program addresses
119 // range from 0 till 640KB (USERTOP), which where the I/O hole starts
120 // (both in physical memory and in the kernel's virtual address
121 // space).
122 static struct kmap {
123 void *p;
124 void *e;
125 int perm;
126 } kmap[] = {
127 {(void*)USERTOP, (void*)0x100000, PTE_W}, // I/O space
128 {(void*)0x100000, data, 0 }, // kernel text, rodata
129 {data, (void*)PHYSTOP, PTE_W}, // kernel data, memory
130 {(void*)0xFE000000, 0, PTE_W}, // device mappings
133 // Set up kernel part of a page table.
134 pde_t*
135 setupkvm(void)
137 pde_t *pgdir;
138 struct kmap *k;
140 if((pgdir = (pde_t*)kalloc()) == 0)
141 return 0;
142 memset(pgdir, 0, PGSIZE);
143 k = kmap;
144 for(k = kmap; k < &kmap[NELEM(kmap)]; k++)
145 if(mappages(pgdir, k->p, k->e - k->p, (uint)k->p, k->perm) < 0)
146 return 0;
148 return pgdir;
151 // Turn on paging.
152 void
153 vmenable(void)
155 uint cr0;
157 switchkvm(); // load kpgdir into cr3
158 cr0 = rcr0();
159 cr0 |= CR0_PG;
160 lcr0(cr0);
163 // Switch h/w page table register to the kernel-only page table,
164 // for when no process is running.
165 void
166 switchkvm(void)
168 lcr3(PADDR(kpgdir)); // switch to the kernel page table
171 // Switch TSS and h/w page table to correspond to process p.
172 void
173 switchuvm(struct proc *p)
175 pushcli();
176 cpu->gdt[SEG_TSS] = SEG16(STS_T32A, &cpu->ts, sizeof(cpu->ts)-1, 0);
177 cpu->gdt[SEG_TSS].s = 0;
178 cpu->ts.ss0 = SEG_KDATA << 3;
179 cpu->ts.esp0 = (uint)proc->kstack + KSTACKSIZE;
180 ltr(SEG_TSS << 3);
181 if(p->pgdir == 0)
182 panic("switchuvm: no pgdir");
183 lcr3(PADDR(p->pgdir)); // switch to new address space
184 popcli();
187 // Load the initcode into address 0 of pgdir.
188 // sz must be less than a page.
189 void
190 inituvm(pde_t *pgdir, char *init, uint sz)
192 char *mem;
194 if(sz >= PGSIZE)
195 panic("inituvm: more than a page");
196 mem = kalloc();
197 memset(mem, 0, PGSIZE);
198 mappages(pgdir, 0, PGSIZE, PADDR(mem), PTE_W|PTE_U);
199 memmove(mem, init, sz);
202 // Load a program segment into pgdir. addr must be page-aligned
203 // and the pages from addr to addr+sz must already be mapped.
205 loaduvm(pde_t *pgdir, char *addr, struct inode *ip, uint offset, uint sz)
207 uint i, pa, n;
208 pte_t *pte;
210 if((uint)addr % PGSIZE != 0)
211 panic("loaduvm: addr must be page aligned");
212 for(i = 0; i < sz; i += PGSIZE){
213 if((pte = walkpgdir(pgdir, addr+i, 0)) == 0)
214 panic("loaduvm: address should exist");
215 pa = PTE_ADDR(*pte);
216 if(sz - i < PGSIZE)
217 n = sz - i;
218 else
219 n = PGSIZE;
220 if(readi(ip, (char*)pa, offset+i, n) != n)
221 return -1;
223 return 0;
226 // Allocate page tables and physical memory to grow process from oldsz to
227 // newsz, which need not be page aligned. Returns new size or 0 on error.
229 allocuvm(pde_t *pgdir, uint oldsz, uint newsz)
231 char *mem;
232 uint a;
234 if(newsz > USERTOP)
235 return 0;
236 if(newsz < oldsz)
237 return oldsz;
239 a = PGROUNDUP(oldsz);
240 for(; a < newsz; a += PGSIZE){
241 mem = kalloc();
242 if(mem == 0){
243 cprintf("allocuvm out of memory\n");
244 deallocuvm(pgdir, newsz, oldsz);
245 return 0;
247 memset(mem, 0, PGSIZE);
248 mappages(pgdir, (char*)a, PGSIZE, PADDR(mem), PTE_W|PTE_U);
250 return newsz;
253 // Deallocate user pages to bring the process size from oldsz to
254 // newsz. oldsz and newsz need not be page-aligned, nor does newsz
255 // need to be less than oldsz. oldsz can be larger than the actual
256 // process size. Returns the new process size.
258 deallocuvm(pde_t *pgdir, uint oldsz, uint newsz)
260 pte_t *pte;
261 uint a, pa;
263 if(newsz >= oldsz)
264 return oldsz;
266 a = PGROUNDUP(newsz);
267 for(; a < oldsz; a += PGSIZE){
268 pte = walkpgdir(pgdir, (char*)a, 0);
269 if(pte && (*pte & PTE_P) != 0){
270 pa = PTE_ADDR(*pte);
271 if(pa == 0)
272 panic("kfree");
273 kfree((char*)pa);
274 *pte = 0;
277 return newsz;
280 // Free a page table and all the physical memory pages
281 // in the user part.
282 void
283 freevm(pde_t *pgdir)
285 uint i;
287 if(pgdir == 0)
288 panic("freevm: no pgdir");
289 deallocuvm(pgdir, USERTOP, 0);
290 for(i = 0; i < NPDENTRIES; i++){
291 if(pgdir[i] & PTE_P)
292 kfree((char*)PTE_ADDR(pgdir[i]));
294 kfree((char*)pgdir);
297 // Given a parent process's page table, create a copy
298 // of it for a child.
299 pde_t*
300 copyuvm(pde_t *pgdir, uint sz)
302 pde_t *d;
303 pte_t *pte;
304 uint pa, i;
305 char *mem;
307 if((d = setupkvm()) == 0)
308 return 0;
309 for(i = 0; i < sz; i += PGSIZE){
310 if((pte = walkpgdir(pgdir, (void*)i, 0)) == 0)
311 panic("copyuvm: pte should exist");
312 if(!(*pte & PTE_P))
313 panic("copyuvm: page not present");
314 pa = PTE_ADDR(*pte);
315 if((mem = kalloc()) == 0)
316 goto bad;
317 memmove(mem, (char*)pa, PGSIZE);
318 if(mappages(d, (void*)i, PGSIZE, PADDR(mem), PTE_W|PTE_U) < 0)
319 goto bad;
321 return d;
323 bad:
324 freevm(d);
325 return 0;
328 // Map user virtual address to kernel physical address.
329 char*
330 uva2ka(pde_t *pgdir, char *uva)
332 pte_t *pte;
334 pte = walkpgdir(pgdir, uva, 0);
335 if((*pte & PTE_P) == 0)
336 return 0;
337 if((*pte & PTE_U) == 0)
338 return 0;
339 return (char*)PTE_ADDR(*pte);
342 // Copy len bytes from p to user address va in page table pgdir.
343 // Most useful when pgdir is not the current page table.
344 // uva2ka ensures this only works for PTE_U pages.
346 copyout(pde_t *pgdir, uint va, void *p, uint len)
348 char *buf, *pa0;
349 uint n, va0;
351 buf = (char*)p;
352 while(len > 0){
353 va0 = (uint)PGROUNDDOWN(va);
354 pa0 = uva2ka(pgdir, (char*)va0);
355 if(pa0 == 0)
356 return -1;
357 n = PGSIZE - (va - va0);
358 if(n > len)
359 n = len;
360 memmove(pa0 + (va - va0), buf, n);
361 len -= n;
362 buf += n;
363 va = va0 + PGSIZE;
365 return 0;