Finished device vm_store. Now areas created w/vm_map_physical_memory
[newos.git] / kernel / vm / vm_store_device.c
blobfa6f36d27b68929f21db06dfa73f06d32d964b19
1 /*
2 ** Copyright 2001, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
4 */
5 #include <kernel/kernel.h>
6 #include <kernel/vm.h>
7 #include <kernel/heap.h>
8 #include <kernel/debug.h>
9 #include <kernel/lock.h>
10 #include <kernel/vm_store_device.h>
12 struct device_store_data {
13 addr base_addr;
16 static void device_destroy(struct vm_store *store)
18 if(store) {
19 kfree(store);
23 static off_t device_commit(struct vm_store *store, off_t size)
25 return 0;
28 static int device_has_page(struct vm_store *store, off_t offset)
30 // this should never be called
31 return 0;
34 static int device_read(struct vm_store *store, off_t offset, void *buf, size_t *len)
36 panic("device_store: read called. Invalid!\n");
39 static int device_write(struct vm_store *store, off_t offset, const void *buf, size_t *len)
41 panic("device_store: write called. Invalid!\n");
44 // this fault handler should take over the page fault routine and map the page in
46 // setup: the cache that this store is part of has a ref being held and will be
47 // released after this handler is done
48 static int device_fault(struct vm_store *store, struct vm_address_space *aspace, off_t offset)
50 struct device_store_data *d = (struct device_store_data *)store->data;
51 vm_cache_ref *cache_ref = store->cache->ref;
52 vm_region *region;
54 dprintf("device_fault: offset 0x%d + base_addr 0x%x\n", offset, d->base_addr);
56 // figure out which page needs to be mapped where
57 (*aspace->translation_map.ops->lock)(&aspace->translation_map);
58 mutex_lock(&cache_ref->lock);
60 // cycle through all of the regions that map this cache and map the page in
61 for(region = cache_ref->region_list; region != NULL; region = region->cache_next) {
62 // make sure this page in the cache that was faulted on is covered in this region
63 if(offset >= region->cache_offset && (offset - region->cache_offset) < region->size) {
64 dprintf("device_fault: mapping paddr 0x%x to vaddr 0x%x\n",
65 (addr)(d->base_addr + offset),
66 (addr)(region->base + (offset - region->cache_offset)));
67 (*aspace->translation_map.ops->map)(&aspace->translation_map,
68 region->base + (offset - region->cache_offset),
69 d->base_addr + offset, region->lock);
73 mutex_unlock(&cache_ref->lock);
74 (*aspace->translation_map.ops->unlock)(&aspace->translation_map);
76 dprintf("device_fault: done\n");
78 return 0;
81 static vm_store_ops device_ops = {
82 &device_destroy,
83 &device_commit,
84 &device_has_page,
85 &device_read,
86 &device_write,
87 &device_fault
90 vm_store *vm_store_create_device(addr base_addr)
92 vm_store *store;
93 struct device_store_data *d;
95 store = kmalloc(sizeof(vm_store) + sizeof(struct device_store_data));
96 if(store == NULL)
97 return NULL;
99 store->ops = &device_ops;
100 store->cache = NULL;
101 store->data = (void *)((addr)store + sizeof(vm_store));
103 d = (struct device_store_data *)store->data;
104 d->base_addr = base_addr;
106 return store;