9 extern char data
[]; // defined in data.S
11 static pde_t
*kpgdir
; // for use in scheduler()
13 // Allocate one page table for the machine for the kernel address
14 // space for scheduler processes.
21 // Set up CPU's kernel segment descriptors.
22 // Run once at boot time on each CPU.
28 // Map virtual addresses to linear addresses using identity map.
29 // Cannot share a CODE descriptor for both kernel and user
30 // because it would have to have DPL_USR, but the CPU forbids
31 // an interrupt from CPL=0 to DPL=3.
33 c
->gdt
[SEG_KCODE
] = SEG(STA_X
|STA_R
, 0, 0xffffffff, 0);
34 c
->gdt
[SEG_KDATA
] = SEG(STA_W
, 0, 0xffffffff, 0);
35 c
->gdt
[SEG_UCODE
] = SEG(STA_X
|STA_R
, 0, 0xffffffff, DPL_USER
);
36 c
->gdt
[SEG_UDATA
] = SEG(STA_W
, 0, 0xffffffff, DPL_USER
);
38 // Map cpu, and curproc
39 c
->gdt
[SEG_KCPU
] = SEG(STA_W
, &c
->cpu
, 8, 0);
41 lgdt(c
->gdt
, sizeof(c
->gdt
));
42 loadgs(SEG_KCPU
<< 3);
44 // Initialize cpu-local storage.
49 // Return the address of the PTE in page table pgdir
50 // that corresponds to linear address va. If create!=0,
51 // create any required page table pages.
53 walkpgdir(pde_t
*pgdir
, const void *va
, int create
)
58 pde
= &pgdir
[PDX(va
)];
60 pgtab
= (pte_t
*)PTE_ADDR(*pde
);
62 if(!create
|| (pgtab
= (pte_t
*)kalloc()) == 0)
64 // Make sure all those PTE_P bits are zero.
65 memset(pgtab
, 0, PGSIZE
);
66 // The permissions here are overly generous, but they can
67 // be further restricted by the permissions in the page table
68 // entries, if necessary.
69 *pde
= PADDR(pgtab
) | PTE_P
| PTE_W
| PTE_U
;
71 return &pgtab
[PTX(va
)];
74 // Create PTEs for linear addresses starting at la that refer to
75 // physical addresses starting at pa. la and size might not
78 mappages(pde_t
*pgdir
, void *la
, uint size
, uint pa
, int perm
)
84 last
= PGROUNDDOWN(la
+ size
- 1);
86 pte
= walkpgdir(pgdir
, a
, 1);
91 *pte
= pa
| perm
| PTE_P
;
100 // The mappings from logical to linear are one to one (i.e.,
101 // segmentation doesn't do anything).
102 // There is one page table per process, plus one that's used
103 // when a CPU is not running any process (kpgdir).
104 // A user process uses the same page table as the kernel; the
105 // page protection bits prevent it from using anything other
108 // setupkvm() and exec() set up every page table like this:
109 // 0..640K : user memory (text, data, stack, heap)
110 // 640K..1M : mapped direct (for IO space)
111 // 1M..end : mapped direct (for the kernel's text and data)
112 // end..PHYSTOP : mapped direct (kernel heap and user pages)
113 // 0xfe000000..0 : mapped direct (devices such as ioapic)
115 // The kernel allocates memory for its heap and for user memory
116 // between kernend and the end of physical memory (PHYSTOP).
117 // The virtual address space of each user program includes the kernel
118 // (which is inaccessible in user mode). The user program addresses
119 // range from 0 till 640KB (USERTOP), which where the I/O hole starts
120 // (both in physical memory and in the kernel's virtual address
127 {(void*)USERTOP
, (void*)0x100000, PTE_W
}, // I/O space
128 {(void*)0x100000, data
, 0 }, // kernel text, rodata
129 {data
, (void*)PHYSTOP
, PTE_W
}, // kernel data, memory
130 {(void*)0xFE000000, 0, PTE_W
}, // device mappings
133 // Set up kernel part of a page table.
140 if((pgdir
= (pde_t
*)kalloc()) == 0)
142 memset(pgdir
, 0, PGSIZE
);
144 for(k
= kmap
; k
< &kmap
[NELEM(kmap
)]; k
++)
145 if(mappages(pgdir
, k
->p
, k
->e
- k
->p
, (uint
)k
->p
, k
->perm
) < 0)
157 switchkvm(); // load kpgdir into cr3
163 // Switch h/w page table register to the kernel-only page table,
164 // for when no process is running.
168 lcr3(PADDR(kpgdir
)); // switch to the kernel page table
171 // Switch TSS and h/w page table to correspond to process p.
173 switchuvm(struct proc
*p
)
176 cpu
->gdt
[SEG_TSS
] = SEG16(STS_T32A
, &cpu
->ts
, sizeof(cpu
->ts
)-1, 0);
177 cpu
->gdt
[SEG_TSS
].s
= 0;
178 cpu
->ts
.ss0
= SEG_KDATA
<< 3;
179 cpu
->ts
.esp0
= (uint
)proc
->kstack
+ KSTACKSIZE
;
182 panic("switchuvm: no pgdir");
183 lcr3(PADDR(p
->pgdir
)); // switch to new address space
187 // Load the initcode into address 0 of pgdir.
188 // sz must be less than a page.
190 inituvm(pde_t
*pgdir
, char *init
, uint sz
)
195 panic("inituvm: more than a page");
197 memset(mem
, 0, PGSIZE
);
198 mappages(pgdir
, 0, PGSIZE
, PADDR(mem
), PTE_W
|PTE_U
);
199 memmove(mem
, init
, sz
);
202 // Load a program segment into pgdir. addr must be page-aligned
203 // and the pages from addr to addr+sz must already be mapped.
205 loaduvm(pde_t
*pgdir
, char *addr
, struct inode
*ip
, uint offset
, uint sz
)
210 if((uint
)addr
% PGSIZE
!= 0)
211 panic("loaduvm: addr must be page aligned");
212 for(i
= 0; i
< sz
; i
+= PGSIZE
){
213 if((pte
= walkpgdir(pgdir
, addr
+i
, 0)) == 0)
214 panic("loaduvm: address should exist");
220 if(readi(ip
, (char*)pa
, offset
+i
, n
) != n
)
226 // Allocate page tables and physical memory to grow process from oldsz to
227 // newsz, which need not be page aligned. Returns new size or 0 on error.
229 allocuvm(pde_t
*pgdir
, uint oldsz
, uint newsz
)
239 a
= PGROUNDUP(oldsz
);
240 for(; a
< newsz
; a
+= PGSIZE
){
243 cprintf("allocuvm out of memory\n");
244 deallocuvm(pgdir
, newsz
, oldsz
);
247 memset(mem
, 0, PGSIZE
);
248 mappages(pgdir
, (char*)a
, PGSIZE
, PADDR(mem
), PTE_W
|PTE_U
);
253 // Deallocate user pages to bring the process size from oldsz to
254 // newsz. oldsz and newsz need not be page-aligned, nor does newsz
255 // need to be less than oldsz. oldsz can be larger than the actual
256 // process size. Returns the new process size.
258 deallocuvm(pde_t
*pgdir
, uint oldsz
, uint newsz
)
266 a
= PGROUNDUP(newsz
);
267 for(; a
< oldsz
; a
+= PGSIZE
){
268 pte
= walkpgdir(pgdir
, (char*)a
, 0);
269 if(pte
&& (*pte
& PTE_P
) != 0){
280 // Free a page table and all the physical memory pages
288 panic("freevm: no pgdir");
289 deallocuvm(pgdir
, USERTOP
, 0);
290 for(i
= 0; i
< NPDENTRIES
; i
++){
292 kfree((char*)PTE_ADDR(pgdir
[i
]));
297 // Given a parent process's page table, create a copy
298 // of it for a child.
300 copyuvm(pde_t
*pgdir
, uint sz
)
307 if((d
= setupkvm()) == 0)
309 for(i
= 0; i
< sz
; i
+= PGSIZE
){
310 if((pte
= walkpgdir(pgdir
, (void*)i
, 0)) == 0)
311 panic("copyuvm: pte should exist");
313 panic("copyuvm: page not present");
315 if((mem
= kalloc()) == 0)
317 memmove(mem
, (char*)pa
, PGSIZE
);
318 if(mappages(d
, (void*)i
, PGSIZE
, PADDR(mem
), PTE_W
|PTE_U
) < 0)
328 // Map user virtual address to kernel physical address.
330 uva2ka(pde_t
*pgdir
, char *uva
)
334 pte
= walkpgdir(pgdir
, uva
, 0);
335 if((*pte
& PTE_P
) == 0)
337 if((*pte
& PTE_U
) == 0)
339 return (char*)PTE_ADDR(*pte
);
342 // Copy len bytes from p to user address va in page table pgdir.
343 // Most useful when pgdir is not the current page table.
344 // uva2ka ensures this only works for PTE_U pages.
346 copyout(pde_t
*pgdir
, uint va
, void *p
, uint len
)
353 va0
= (uint
)PGROUNDDOWN(va
);
354 pa0
= uva2ka(pgdir
, (char*)va0
);
357 n
= PGSIZE
- (va
- va0
);
360 memmove(pa0
+ (va
- va0
), buf
, n
);