Finished device vm_store. Now areas created w/vm_map_physical_memory
[newos.git] / kernel / vm / vm_cache.c
blob524f83650129642fb94409c6698294f7d51eb41f
1 /*
2 ** Copyright 2001, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
4 */
5 #include <kernel/kernel.h>
6 #include <kernel/vm.h>
7 #include <kernel/vm_cache.h>
8 #include <kernel/vm_page.h>
9 #include <kernel/heap.h>
10 #include <kernel/lock.h>
11 #include <kernel/debug.h>
12 #include <kernel/lock.h>
13 #include <kernel/arch/cpu.h>
15 vm_cache *vm_cache_create(vm_store *store)
17 vm_cache *cache;
19 cache = kmalloc(sizeof(vm_cache));
20 if(cache == NULL)
21 return NULL;
23 cache->page_list = NULL;
24 cache->ref = NULL;
25 cache->store = store;
26 if(store != NULL)
27 store->cache = cache;
29 return cache;
32 vm_cache_ref *vm_cache_ref_create(vm_cache *cache)
34 vm_cache_ref *ref;
36 ref = kmalloc(sizeof(vm_cache_ref));
37 if(ref == NULL)
38 return NULL;
40 ref->cache = cache;
41 mutex_init(&ref->lock, "cache_ref_mutex");
42 ref->region_list = NULL;
43 ref->ref_count = 0;
44 cache->ref = ref;
46 return ref;
49 void vm_cache_acquire_ref(vm_cache_ref *cache_ref)
51 if(cache_ref == NULL)
52 panic("vm_cache_acquire_ref: passed NULL\n");
53 atomic_add(&cache_ref->ref_count, 1);
56 void vm_cache_release_ref(vm_cache_ref *cache_ref)
58 vm_page *page;
60 if(cache_ref == NULL)
61 panic("vm_cache_release_ref: passed NULL\n");
62 if(atomic_add(&cache_ref->ref_count, -1) == 1) {
63 // delete this cache
64 // delete the cache's backing store, if it has one
65 if(cache_ref->cache->store)
66 (*cache_ref->cache->store->ops->destroy)(cache_ref->cache->store);
68 // free all of the pages in the cache
69 page = cache_ref->cache->page_list;
70 while(page) {
71 vm_page *old_page = page;
72 page = page->cache_next;
73 dprintf("vm_cache_release_ref: freeing page 0x%x\n", old_page->ppn);
74 vm_page_set_state(old_page, PAGE_STATE_FREE);
77 mutex_destroy(&cache_ref->lock);
78 kfree(cache_ref->cache);
79 kfree(cache_ref);
83 vm_page *vm_cache_lookup_page(vm_cache_ref *cache_ref, off_t offset)
85 vm_page *page;
87 for(page = cache_ref->cache->page_list; page != NULL; page = page->cache_next) {
88 if(page->offset == offset) {
89 return page;
93 return NULL;
96 void vm_cache_insert_page(vm_cache_ref *cache_ref, vm_page *page, off_t offset)
98 page->offset = offset;
100 if(cache_ref->cache->page_list != NULL) {
101 cache_ref->cache->page_list->cache_prev = page;
103 page->cache_next = cache_ref->cache->page_list;
104 page->cache_prev = NULL;
105 cache_ref->cache->page_list = page;
107 page->cache_ref = cache_ref;
110 void vm_cache_remove_page(vm_cache_ref *cache_ref, vm_page *page)
112 if(cache_ref->cache->page_list == page) {
113 if(page->cache_next != NULL)
114 page->cache_next->cache_prev = NULL;
115 cache_ref->cache->page_list = page->cache_next;
116 } else {
117 if(page->cache_prev != NULL)
118 page->cache_prev->cache_next = page->cache_next;
119 if(page->cache_next != NULL)
120 page->cache_next->cache_prev = page->cache_next;
122 page->cache_ref = NULL;
125 int vm_cache_insert_region(vm_cache_ref *cache_ref, vm_region *region)
127 mutex_lock(&cache_ref->lock);
129 region->cache_next = cache_ref->region_list;
130 if(region->cache_next)
131 region->cache_next->cache_prev = region;
132 region->cache_prev = NULL;
133 cache_ref->region_list = region;
135 mutex_unlock(&cache_ref->lock);
138 int vm_cache_remove_region(vm_cache_ref *cache_ref, vm_region *region)
140 mutex_lock(&cache_ref->lock);
142 if(region->cache_prev)
143 region->cache_prev->cache_next = region->cache_next;
144 if(region->cache_next)
145 region->cache_next->cache_prev = region->cache_prev;
146 if(cache_ref->region_list == region)
147 cache_ref->region_list = region->cache_next;
149 mutex_unlock(&cache_ref->lock);