loader: define bootprog_info in bootstrap.h
[unleashed.git] / include / vm / object.h
blob6e50b5c22af38e42df5bd18facd6c699ce89dfdc
1 /*
2 * Copyright 2017 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #ifndef _VM_OBJECT_H
18 #define _VM_OBJECT_H
20 #include <sys/list.h>
21 #include <sys/avl.h>
23 #ifdef __cplusplus
24 extern "C" {
25 #endif
27 struct vnode;
29 struct vmobject {
31 * We keep all the pages in an AVL tree indexed by the offset. This
32 * allows us to do quick offset lookups.
34 * We also keep all the pages on a list, which we use during
35 * eviction.
37 avl_tree_t tree;
38 list_t list;
40 kmutex_t lock;
42 struct vnode *vnode; /* the owner */
45 #if defined(_KERNEL)
46 #define vmobject_add_page_head(o,p) list_insert_head(&(o)->list, (p))
47 #define vmobject_add_page_tail(o,p) list_insert_tail(&(o)->list, (p))
48 #define vmobject_remove_page(o,p) list_remove(&(o)->list, (p))
49 #define vmobject_get_head(o) list_head(&(o)->list)
50 #define vmobject_get_tail(o) list_tail(&(o)->list)
51 #define vmobject_get_prev(o,p) list_prev(&(o)->list, (p))
52 #define vmobject_get_next(o,p) list_next(&(o)->list, (p))
54 static inline struct page *
55 vmobject_get_prev_loop(struct vmobject *obj, struct page *page)
57 struct page *p;
59 p = vmobject_get_prev(obj, page);
60 if (p == NULL)
61 p = vmobject_get_tail(obj);
63 return (p);
66 static inline struct page *
67 vmobject_get_next_loop(struct vmobject *obj, struct page *page)
69 struct page *p;
71 p = vmobject_get_next(obj, page);
72 if (p == NULL)
73 p = vmobject_get_head(obj);
75 return (p);
78 static inline void
79 vmobject_move_page_tail(struct vmobject *obj, struct page *page)
81 vmobject_remove_page(obj, page);
82 vmobject_add_page_tail(obj, page);
85 #define VMOBJECT_LOCKED(obj) MUTEX_HELD(&(obj)->lock)
87 static inline void
88 vmobject_lock(struct vmobject *obj)
90 mutex_enter(&obj->lock);
93 static inline int
94 vmobject_trylock(struct vmobject *obj)
96 return mutex_tryenter(&obj->lock);
99 static inline void
100 vmobject_unlock(struct vmobject *obj)
102 mutex_exit(&obj->lock);
105 extern void vmobject_init(struct vmobject *obj, struct vnode *vnode);
106 extern void vmobject_fini(struct vmobject *obj);
108 #endif
110 #ifdef __cplusplus
112 #endif
114 #endif