2 ** Copyright 2001, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
5 #include <kernel/kernel.h>
7 #include <kernel/vm_cache.h>
8 #include <kernel/vm_page.h>
9 #include <kernel/heap.h>
10 #include <kernel/lock.h>
11 #include <kernel/debug.h>
12 #include <kernel/lock.h>
13 #include <kernel/arch/cpu.h>
15 vm_cache
*vm_cache_create(vm_store
*store
)
19 cache
= kmalloc(sizeof(vm_cache
));
23 cache
->page_list
= NULL
;
32 vm_cache_ref
*vm_cache_ref_create(vm_cache
*cache
)
36 ref
= kmalloc(sizeof(vm_cache_ref
));
41 mutex_init(&ref
->lock
, "cache_ref_mutex");
42 ref
->region_list
= NULL
;
49 void vm_cache_acquire_ref(vm_cache_ref
*cache_ref
)
52 panic("vm_cache_acquire_ref: passed NULL\n");
53 atomic_add(&cache_ref
->ref_count
, 1);
56 void vm_cache_release_ref(vm_cache_ref
*cache_ref
)
61 panic("vm_cache_release_ref: passed NULL\n");
62 if(atomic_add(&cache_ref
->ref_count
, -1) == 1) {
64 // delete the cache's backing store, if it has one
65 if(cache_ref
->cache
->store
)
66 (*cache_ref
->cache
->store
->ops
->destroy
)(cache_ref
->cache
->store
);
68 // free all of the pages in the cache
69 page
= cache_ref
->cache
->page_list
;
71 vm_page
*old_page
= page
;
72 page
= page
->cache_next
;
73 dprintf("vm_cache_release_ref: freeing page 0x%x\n", old_page
->ppn
);
74 vm_page_set_state(old_page
, PAGE_STATE_FREE
);
77 mutex_destroy(&cache_ref
->lock
);
78 kfree(cache_ref
->cache
);
83 vm_page
*vm_cache_lookup_page(vm_cache_ref
*cache_ref
, off_t offset
)
87 for(page
= cache_ref
->cache
->page_list
; page
!= NULL
; page
= page
->cache_next
) {
88 if(page
->offset
== offset
) {
96 void vm_cache_insert_page(vm_cache_ref
*cache_ref
, vm_page
*page
, off_t offset
)
98 page
->offset
= offset
;
100 if(cache_ref
->cache
->page_list
!= NULL
) {
101 cache_ref
->cache
->page_list
->cache_prev
= page
;
103 page
->cache_next
= cache_ref
->cache
->page_list
;
104 page
->cache_prev
= NULL
;
105 cache_ref
->cache
->page_list
= page
;
107 page
->cache_ref
= cache_ref
;
110 void vm_cache_remove_page(vm_cache_ref
*cache_ref
, vm_page
*page
)
112 if(cache_ref
->cache
->page_list
== page
) {
113 if(page
->cache_next
!= NULL
)
114 page
->cache_next
->cache_prev
= NULL
;
115 cache_ref
->cache
->page_list
= page
->cache_next
;
117 if(page
->cache_prev
!= NULL
)
118 page
->cache_prev
->cache_next
= page
->cache_next
;
119 if(page
->cache_next
!= NULL
)
120 page
->cache_next
->cache_prev
= page
->cache_next
;
122 page
->cache_ref
= NULL
;
125 int vm_cache_insert_region(vm_cache_ref
*cache_ref
, vm_region
*region
)
127 mutex_lock(&cache_ref
->lock
);
129 region
->cache_next
= cache_ref
->region_list
;
130 if(region
->cache_next
)
131 region
->cache_next
->cache_prev
= region
;
132 region
->cache_prev
= NULL
;
133 cache_ref
->region_list
= region
;
135 mutex_unlock(&cache_ref
->lock
);
138 int vm_cache_remove_region(vm_cache_ref
*cache_ref
, vm_region
*region
)
140 mutex_lock(&cache_ref
->lock
);
142 if(region
->cache_prev
)
143 region
->cache_prev
->cache_next
= region
->cache_next
;
144 if(region
->cache_next
)
145 region
->cache_next
->cache_prev
= region
->cache_prev
;
146 if(cache_ref
->region_list
== region
)
147 cache_ref
->region_list
= region
->cache_next
;
149 mutex_unlock(&cache_ref
->lock
);