Moved PAGE_SIZE to kernel/archinf.h
[marionette.git] / kernel / mm / region.c
blob6d4c49c2037cbcfd0a0cfc2c0cfab4a66a8b4747
1 /*
2 * Copyright (c) 2008 Joshua Phillips. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
15 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
21 * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
23 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
25 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "mm/region.h"
29 #include "mm/paging.h"
30 #include "mm/allocation.h"
31 #include "stddef.h"
32 #include "stdlib.h"
33 #include "interrupt.h"
34 #include "trace.h"
35 #include "assert.h"
37 static struct addr_space *current_addr_space = NULL;
39 static struct p_region *p_r_create(size_t n_pages)
41 struct p_region *p_r;
42 uintptr_t paddr_start;
43 size_t n_pages_alloc;
45 p_r = malloc(sizeof *p_r);
46 if (!p_r){
47 TRACE("p_r_create: memory full");
48 } else {
49 n_pages_alloc = alloc_pages(n_pages, n_pages, &paddr_start);
50 if (n_pages_alloc < n_pages){
51 TRACE("p_r_create: alloc_pages failed");
52 free(p_r);
53 p_r = NULL;
54 } else {
55 p_r->ref_count = 1;
56 p_r->flags = 0;
57 p_r->start = paddr_start / PAGE_SIZE;
58 p_r->len = n_pages;
61 return p_r;
64 static struct p_region *p_r_create_fixed(size_t n_pages, uintptr_t physical)
66 struct p_region *p_r;
68 p_r = malloc(sizeof *p_r);
69 if (!p_r){
70 TRACE("p_r_create_fixed: memory full");
71 } else {
72 p_r->ref_count = 1;
73 p_r->flags = 0;
74 p_r->start = physical / PAGE_SIZE;
75 p_r->len = n_pages;
77 return p_r;
80 static void p_region_addref(struct p_region *p_r)
82 // TODO: bounds check
83 p_r->ref_count++;
86 static void p_region_release(struct p_region *p_r)
88 p_r->ref_count--;
89 if (p_r->ref_count == 0){
90 // no longer used - free it
91 free_pages(p_r->start * PAGE_SIZE, p_r->len);
92 free(p_r);
96 // create a vm_region and insert it into the address space
97 // (does not deal with corresponding p_region)
98 // TODO: check for overlapping regions? (which wouldn't make sense)
99 static struct vm_region *add_vm_region(struct addr_space *aspace, uintptr_t virtual, size_t n_pages)
101 struct vm_region *vm_r;
102 vm_r = malloc(sizeof *vm_r);
103 if (!vm_r){
104 TRACE("add_vm_region: memory full");
105 return NULL;
107 vm_r->start = virtual / PAGE_SIZE;
108 vm_r->len = n_pages;
109 vm_r->flags = 0;
110 // linked-list prepend
111 vm_r->prev = NULL;
112 vm_r->next = aspace->regions;
113 if (aspace->regions){
114 aspace->regions->prev = vm_r;
116 aspace->regions = vm_r;
117 return vm_r;
120 struct addr_space *addr_space_new(void)
122 struct addr_space *aspace;
123 aspace = malloc(sizeof *aspace);
124 if (!aspace){
125 TRACE("new_addr_space: memory full");
126 } else {
127 aspace->regions = NULL;
128 pagedir_create(&aspace->pagedir);
130 // First address space becomes the current one.
131 // XXX: This doesn't seem ... right
132 if (!current_addr_space){
133 current_addr_space = aspace;
136 return aspace;
139 void addr_space_delete(struct addr_space *aspace)
141 // delete and remove each region
142 struct vm_region *vm_r, *vm_r_next;
143 vm_r = aspace->regions;
144 while (vm_r){
145 vm_r_next = vm_r->next;
146 if (vm_r->p_region){
147 p_region_release(vm_r->p_region);
149 free(vm_r);
150 vm_r = vm_r_next;
152 pagedir_destroy(&aspace->pagedir);
153 // free the addr_space struct
154 free(aspace);
157 struct vm_region *vm_region_new_physical(struct addr_space *aspace,
158 uintptr_t virtual, size_t n_pages)
160 struct vm_region *vm_r;
161 struct p_region *p_r;
163 vm_r = add_vm_region(aspace, virtual, n_pages);
164 if (!vm_r){
165 return NULL;
167 p_r = p_r_create(n_pages);
168 if (!p_r){
169 // TODO: remove virtual region!!
170 return NULL;
172 vm_r->p_region = p_r;
173 return vm_r;
176 struct vm_region *vm_region_new_physical_fixed(struct addr_space *aspace,
177 uintptr_t virtual, size_t n_pages, uintptr_t physical)
179 struct vm_region *vm_r;
180 struct p_region *p_r;
182 vm_r = add_vm_region(aspace, virtual, n_pages);
183 if (!vm_r){
184 return NULL;
186 p_r = p_r_create_fixed(n_pages, physical);
187 if (!p_r){
188 // TODO: remove virtual region!!
189 return NULL;
191 vm_r->p_region = p_r;
192 return vm_r;
195 struct vm_region *vm_region_new_physical_shared(struct addr_space *aspace,
196 uintptr_t virtual, size_t n_pages)
198 struct vm_region *vm_r;
199 vm_r = vm_region_new_physical(aspace, virtual, n_pages);
200 if (vm_r){
201 vm_r->p_region->flags |= P_R_SHARED;
203 return vm_r;
206 // set and clear region flags
207 // TODO: update page tables!
209 void vm_region_set_flags(struct vm_region *vm_r, int flags)
211 vm_r->flags |= flags;
214 void vm_region_clear_flags(struct vm_region *vm_r, int flags)
216 vm_r->flags &= ~flags;
219 void vm_region_remove(struct addr_space *aspace, struct vm_region *vm_r)
221 // linked-list remove
222 if (vm_r->prev){
223 vm_r->prev->next = vm_r->next;
224 } else {
225 aspace->regions = vm_r->next;
227 if (vm_r->next){
228 vm_r->next->prev = vm_r->prev;
231 // release the associated physical memory
232 if (vm_r->p_region){
233 p_region_release(vm_r->p_region);
235 // free the struct
236 free(vm_r);
239 struct vm_region *vm_region_dup(struct addr_space *new_addr_space, struct vm_region *vm_r, uintptr_t virtual)
241 struct vm_region *new_vm_r;
243 new_vm_r = add_vm_region(new_addr_space, virtual, vm_r->len);
244 if (!new_vm_r){
245 return NULL;
247 new_vm_r->p_region = vm_r->p_region;
248 new_vm_r->flags = vm_r->flags;
249 p_region_addref(new_vm_r->p_region);
250 return new_vm_r;
253 // find vm_region which contains address 'virtual'
254 static struct vm_region *vm_region_find(struct addr_space *aspace, uintptr_t virtual)
256 struct vm_region *vm_r;
257 // linear search!
258 vm_r = aspace->regions;
259 while (vm_r){
260 if (vm_r->start <= (virtual / PAGE_SIZE)
261 && (vm_r->start + vm_r->len) > (virtual / PAGE_SIZE)){
262 return vm_r;
263 } else {
264 vm_r = vm_r->next;
267 // no such luck
268 return NULL;
271 void addr_space_switch(struct addr_space *new_addr_space)
273 current_addr_space = new_addr_space;
274 pagedir_switch(&new_addr_space->pagedir);
277 void pagefault_handler(int vector, struct interrupt_stack *is)
279 struct vm_region *vm_r;
280 struct p_region *p_r;
281 uintptr_t fault_addr, error_code;
283 fault_addr = get_cr2();
284 error_code = is->error_code;
286 assert(current_addr_space != NULL);
287 vm_r = vm_region_find(current_addr_space, fault_addr);
288 if (!vm_r){
289 panic("Page fault at address 0x%.8X - nothing there\n",
290 fault_addr);
291 } else {
292 p_r = vm_r->p_region;
293 assert(p_r->len == vm_r->len); // sanity check
294 if (error_code & 0x1){
295 // page fault caused by page-level protection violation
296 // i.e. the page was there, but permissions were wrong.
297 if (!(vm_r->flags & VM_R_WRITABLE)){
298 panic("Page fault at address 0x%.8X - tried to write to read-only region",
299 fault_addr);
300 } else {
301 panic("COW NOT IMPLEMENTED");
303 //} else if (!(error_code & 0x4)){
304 // page fault caused by kernel
305 } else if (error_code & 0x8){
306 panic("Page fault caused by reserved bit violation");
307 } else {
308 // page not present - time to map it
309 int map_flags;
310 map_flags = PTE_PRESENT | PTE_USER;
311 // set the writable flag in the page table
312 if ((vm_r->flags & VM_R_WRITABLE) &&
313 !(p_r->ref_count > 1 && !(p_r->flags & P_R_SHARED))){ // if the region isn't shared,
314 // and it's referenced by >1 vm_region,
315 // don't make it writable, so that we can
316 // handle the page fault.
317 map_flags |= PTE_WRITABLE;
319 map_mem(p_r->start * PAGE_SIZE, // physical
320 vm_r->start * PAGE_SIZE, // virtual
321 vm_r->len, // n_pages
322 map_flags); // flags
327 void set_pagefault_handler(void)
329 set_interrupt(0xE, pagefault_handler);