2 * Copyright (c) 2008 Joshua Phillips. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
15 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
21 * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
23 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
25 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "mm/region.h"
29 #include "mm/paging.h"
30 #include "mm/allocation.h"
33 #include "interrupt.h"
37 static struct addr_space
*current_addr_space
= NULL
;
39 static struct p_region
*p_r_create(size_t n_pages
)
42 uintptr_t paddr_start
;
45 p_r
= malloc(sizeof *p_r
);
47 TRACE("p_r_create: memory full");
49 n_pages_alloc
= alloc_pages(n_pages
, n_pages
, &paddr_start
);
50 if (n_pages_alloc
< n_pages
){
51 TRACE("p_r_create: alloc_pages failed");
57 p_r
->start
= paddr_start
/ PAGE_SIZE
;
64 static struct p_region
*p_r_create_fixed(size_t n_pages
, uintptr_t physical
)
68 p_r
= malloc(sizeof *p_r
);
70 TRACE("p_r_create_fixed: memory full");
74 p_r
->start
= physical
/ PAGE_SIZE
;
80 static void p_region_addref(struct p_region
*p_r
)
86 static void p_region_release(struct p_region
*p_r
)
89 if (p_r
->ref_count
== 0){
90 // no longer used - free it
91 free_pages(p_r
->start
* PAGE_SIZE
, p_r
->len
);
96 // create a vm_region and insert it into the address space
97 // (does not deal with corresponding p_region)
98 // TODO: check for overlapping regions? (which wouldn't make sense)
99 static struct vm_region
*add_vm_region(struct addr_space
*aspace
, uintptr_t virtual, size_t n_pages
)
101 struct vm_region
*vm_r
;
102 vm_r
= malloc(sizeof *vm_r
);
104 TRACE("add_vm_region: memory full");
107 vm_r
->start
= virtual / PAGE_SIZE
;
110 // linked-list prepend
112 vm_r
->next
= aspace
->regions
;
113 if (aspace
->regions
){
114 aspace
->regions
->prev
= vm_r
;
116 aspace
->regions
= vm_r
;
120 struct addr_space
*addr_space_new(void)
122 struct addr_space
*aspace
;
123 aspace
= malloc(sizeof *aspace
);
125 TRACE("new_addr_space: memory full");
127 aspace
->regions
= NULL
;
128 pagedir_create(&aspace
->pagedir
);
130 // First address space becomes the current one.
131 // XXX: This doesn't seem ... right
132 if (!current_addr_space
){
133 current_addr_space
= aspace
;
139 void addr_space_delete(struct addr_space
*aspace
)
141 // delete and remove each region
142 struct vm_region
*vm_r
, *vm_r_next
;
143 vm_r
= aspace
->regions
;
145 vm_r_next
= vm_r
->next
;
147 p_region_release(vm_r
->p_region
);
152 pagedir_destroy(&aspace
->pagedir
);
153 // free the addr_space struct
157 struct vm_region
*vm_region_new_physical(struct addr_space
*aspace
,
158 uintptr_t virtual, size_t n_pages
)
160 struct vm_region
*vm_r
;
161 struct p_region
*p_r
;
163 vm_r
= add_vm_region(aspace
, virtual, n_pages
);
167 p_r
= p_r_create(n_pages
);
169 // TODO: remove virtual region!!
172 vm_r
->p_region
= p_r
;
176 struct vm_region
*vm_region_new_physical_fixed(struct addr_space
*aspace
,
177 uintptr_t virtual, size_t n_pages
, uintptr_t physical
)
179 struct vm_region
*vm_r
;
180 struct p_region
*p_r
;
182 vm_r
= add_vm_region(aspace
, virtual, n_pages
);
186 p_r
= p_r_create_fixed(n_pages
, physical
);
188 // TODO: remove virtual region!!
191 vm_r
->p_region
= p_r
;
195 struct vm_region
*vm_region_new_physical_shared(struct addr_space
*aspace
,
196 uintptr_t virtual, size_t n_pages
)
198 struct vm_region
*vm_r
;
199 vm_r
= vm_region_new_physical(aspace
, virtual, n_pages
);
201 vm_r
->p_region
->flags
|= P_R_SHARED
;
206 // set and clear region flags
207 // TODO: update page tables!
209 void vm_region_set_flags(struct vm_region
*vm_r
, int flags
)
211 vm_r
->flags
|= flags
;
214 void vm_region_clear_flags(struct vm_region
*vm_r
, int flags
)
216 vm_r
->flags
&= ~flags
;
219 void vm_region_remove(struct addr_space
*aspace
, struct vm_region
*vm_r
)
221 // linked-list remove
223 vm_r
->prev
->next
= vm_r
->next
;
225 aspace
->regions
= vm_r
->next
;
228 vm_r
->next
->prev
= vm_r
->prev
;
231 // release the associated physical memory
233 p_region_release(vm_r
->p_region
);
239 struct vm_region
*vm_region_dup(struct addr_space
*new_addr_space
, struct vm_region
*vm_r
, uintptr_t virtual)
241 struct vm_region
*new_vm_r
;
243 new_vm_r
= add_vm_region(new_addr_space
, virtual, vm_r
->len
);
247 new_vm_r
->p_region
= vm_r
->p_region
;
248 new_vm_r
->flags
= vm_r
->flags
;
249 p_region_addref(new_vm_r
->p_region
);
253 // find vm_region which contains address 'virtual'
254 static struct vm_region
*vm_region_find(struct addr_space
*aspace
, uintptr_t virtual)
256 struct vm_region
*vm_r
;
258 vm_r
= aspace
->regions
;
260 if (vm_r
->start
<= (virtual / PAGE_SIZE
)
261 && (vm_r
->start
+ vm_r
->len
) > (virtual / PAGE_SIZE
)){
271 void addr_space_switch(struct addr_space
*new_addr_space
)
273 current_addr_space
= new_addr_space
;
274 pagedir_switch(&new_addr_space
->pagedir
);
277 void pagefault_handler(int vector
, struct interrupt_stack
*is
)
279 struct vm_region
*vm_r
;
280 struct p_region
*p_r
;
281 uintptr_t fault_addr
, error_code
;
283 fault_addr
= get_cr2();
284 error_code
= is
->error_code
;
286 assert(current_addr_space
!= NULL
);
287 vm_r
= vm_region_find(current_addr_space
, fault_addr
);
289 panic("Page fault at address 0x%.8X - nothing there\n",
292 p_r
= vm_r
->p_region
;
293 assert(p_r
->len
== vm_r
->len
); // sanity check
294 if (error_code
& 0x1){
295 // page fault caused by page-level protection violation
296 // i.e. the page was there, but permissions were wrong.
297 if (!(vm_r
->flags
& VM_R_WRITABLE
)){
298 panic("Page fault at address 0x%.8X - tried to write to read-only region",
301 panic("COW NOT IMPLEMENTED");
303 //} else if (!(error_code & 0x4)){
304 // page fault caused by kernel
305 } else if (error_code
& 0x8){
306 panic("Page fault caused by reserved bit violation");
308 // page not present - time to map it
310 map_flags
= PTE_PRESENT
| PTE_USER
;
311 // set the writable flag in the page table
312 if ((vm_r
->flags
& VM_R_WRITABLE
) &&
313 !(p_r
->ref_count
> 1 && !(p_r
->flags
& P_R_SHARED
))){ // if the region isn't shared,
314 // and it's referenced by >1 vm_region,
315 // don't make it writable, so that we can
316 // handle the page fault.
317 map_flags
|= PTE_WRITABLE
;
319 map_mem(p_r
->start
* PAGE_SIZE
, // physical
320 vm_r
->start
* PAGE_SIZE
, // virtual
321 vm_r
->len
, // n_pages
327 void set_pagefault_handler(void)
329 set_interrupt(0xE, pagefault_handler
);