2 ** Copyright 2001, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
5 #include <kernel/kernel.h>
7 #include <kernel/heap.h>
8 #include <kernel/debug.h>
9 #include <kernel/lock.h>
10 #include <kernel/vm_store_device.h>
12 struct device_store_data
{
16 static void device_destroy(struct vm_store
*store
)
23 static off_t
device_commit(struct vm_store
*store
, off_t size
)
28 static int device_has_page(struct vm_store
*store
, off_t offset
)
30 // this should never be called
34 static int device_read(struct vm_store
*store
, off_t offset
, void *buf
, size_t *len
)
36 panic("device_store: read called. Invalid!\n");
39 static int device_write(struct vm_store
*store
, off_t offset
, const void *buf
, size_t *len
)
41 panic("device_store: write called. Invalid!\n");
44 // this fault handler should take over the page fault routine and map the page in
46 // setup: the cache that this store is part of has a ref being held and will be
47 // released after this handler is done
48 static int device_fault(struct vm_store
*store
, struct vm_address_space
*aspace
, off_t offset
)
50 struct device_store_data
*d
= (struct device_store_data
*)store
->data
;
51 vm_cache_ref
*cache_ref
= store
->cache
->ref
;
54 dprintf("device_fault: offset 0x%d + base_addr 0x%x\n", offset
, d
->base_addr
);
56 // figure out which page needs to be mapped where
57 (*aspace
->translation_map
.ops
->lock
)(&aspace
->translation_map
);
58 mutex_lock(&cache_ref
->lock
);
60 // cycle through all of the regions that map this cache and map the page in
61 for(region
= cache_ref
->region_list
; region
!= NULL
; region
= region
->cache_next
) {
62 // make sure this page in the cache that was faulted on is covered in this region
63 if(offset
>= region
->cache_offset
&& (offset
- region
->cache_offset
) < region
->size
) {
64 dprintf("device_fault: mapping paddr 0x%x to vaddr 0x%x\n",
65 (addr
)(d
->base_addr
+ offset
),
66 (addr
)(region
->base
+ (offset
- region
->cache_offset
)));
67 (*aspace
->translation_map
.ops
->map
)(&aspace
->translation_map
,
68 region
->base
+ (offset
- region
->cache_offset
),
69 d
->base_addr
+ offset
, region
->lock
);
73 mutex_unlock(&cache_ref
->lock
);
74 (*aspace
->translation_map
.ops
->unlock
)(&aspace
->translation_map
);
76 dprintf("device_fault: done\n");
81 static vm_store_ops device_ops
= {
90 vm_store
*vm_store_create_device(addr base_addr
)
93 struct device_store_data
*d
;
95 store
= kmalloc(sizeof(vm_store
) + sizeof(struct device_store_data
));
99 store
->ops
= &device_ops
;
101 store
->data
= (void *)((addr
)store
+ sizeof(vm_store
));
103 d
= (struct device_store_data
*)store
->data
;
104 d
->base_addr
= base_addr
;