- Test m_pkthdr.fw_flags against DUMMYNET_MBUF_TAGGED before trying to locate
[dragonfly/netmp.git] / sys / sys / buf2.h
blobed2a8502f1659286a1bdeff93a4172f818a4a5da
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
38 * @(#)buf.h 8.9 (Berkeley) 3/30/95
39 * $FreeBSD: src/sys/sys/buf.h,v 1.88.2.10 2003/01/25 19:02:23 dillon Exp $
40 * $DragonFly: src/sys/sys/buf2.h,v 1.21 2008/01/28 07:19:06 nth Exp $
43 #ifndef _SYS_BUF2_H_
44 #define _SYS_BUF2_H_
46 #ifdef _KERNEL
48 #ifndef _SYS_BUF_H_
49 #include <sys/buf.h> /* crit_*() functions */
50 #endif
51 #ifndef _SYS_GLOBALDATA_H_
52 #include <sys/globaldata.h> /* curthread */
53 #endif
54 #ifndef _SYS_THREAD2_H_
55 #include <sys/thread2.h> /* crit_*() functions */
56 #endif
57 #ifndef _SYS_SPINLOCK2_H_
58 #include <sys/spinlock2.h> /* crit_*() functions */
59 #endif
60 #ifndef _SYS_MOUNT_H_
61 #include <sys/mount.h>
62 #endif
63 #ifndef _SYS_VNODE_H_
64 #include <sys/vnode.h>
65 #endif
68 * Initialize a lock.
70 #define BUF_LOCKINIT(bp) \
71 lockinit(&(bp)->b_lock, buf_wmesg, 0, 0)
75 * Get a lock sleeping non-interruptably until it becomes available.
77 * XXX lk_wmesg can race, but should not result in any operational issues.
79 static __inline int
80 BUF_LOCK(struct buf *bp, int locktype)
82 bp->b_lock.lk_wmesg = buf_wmesg;
83 return (lockmgr(&(bp)->b_lock, locktype));
86 * Get a lock sleeping with specified interruptably and timeout.
88 * XXX lk_timo can race against other entities calling BUF_TIMELOCK,
89 * but will not interfere with entities calling BUF_LOCK since LK_TIMELOCK
90 * will not be set in that case.
92 * XXX lk_wmesg can race, but should not result in any operational issues.
94 static __inline int
95 BUF_TIMELOCK(struct buf *bp, int locktype, char *wmesg, int timo)
97 bp->b_lock.lk_wmesg = wmesg;
98 bp->b_lock.lk_timo = timo;
99 return (lockmgr(&(bp)->b_lock, locktype | LK_TIMELOCK));
102 * Release a lock. Only the acquiring process may free the lock unless
103 * it has been handed off to biodone.
105 static __inline void
106 BUF_UNLOCK(struct buf *bp)
108 lockmgr(&(bp)->b_lock, LK_RELEASE);
112 * When initiating asynchronous I/O, change ownership of the lock to the
113 * kernel. Once done, the lock may legally released by biodone. The
114 * original owning process can no longer acquire it recursively, but must
115 * wait until the I/O is completed and the lock has been freed by biodone.
117 static __inline void
118 BUF_KERNPROC(struct buf *bp)
120 lockmgr_kernproc(&(bp)->b_lock);
123 * Find out the number of references to a lock.
125 * The non-blocking version should only be used for assertions in cases
126 * where the buffer is expected to be owned or otherwise data stable.
128 static __inline int
129 BUF_REFCNT(struct buf *bp)
131 return (lockcount(&(bp)->b_lock));
134 static __inline int
135 BUF_REFCNTNB(struct buf *bp)
137 return (lockcountnb(&(bp)->b_lock));
141 * Free a buffer lock.
143 #define BUF_LOCKFREE(bp) \
144 if (BUF_REFCNTNB(bp) > 0) \
145 panic("free locked buf")
147 static __inline void
148 bioq_init(struct bio_queue_head *head)
150 TAILQ_INIT(&head->queue);
151 head->last_offset = 0;
152 head->insert_point = NULL;
153 head->switch_point = NULL;
156 static __inline void
157 bioq_insert_tail(struct bio_queue_head *head, struct bio *bio)
159 if ((bio->bio_buf->b_flags & B_ORDERED) != 0) {
160 head->insert_point = bio;
161 head->switch_point = NULL;
163 TAILQ_INSERT_TAIL(&head->queue, bio, bio_act);
166 static __inline void
167 bioq_remove(struct bio_queue_head *head, struct bio *bio)
169 if (bio == head->switch_point)
170 head->switch_point = TAILQ_NEXT(bio, bio_act);
171 if (bio == head->insert_point) {
172 head->insert_point = TAILQ_PREV(bio, bio_queue, bio_act);
173 if (head->insert_point == NULL)
174 head->last_offset = 0;
175 } else if (bio == TAILQ_FIRST(&head->queue))
176 head->last_offset = bio->bio_offset;
177 TAILQ_REMOVE(&head->queue, bio, bio_act);
178 if (TAILQ_FIRST(&head->queue) == head->switch_point)
179 head->switch_point = NULL;
182 static __inline struct bio *
183 bioq_first(struct bio_queue_head *head)
185 return (TAILQ_FIRST(&head->queue));
189 * biodeps inlines - used by softupdates and HAMMER.
191 static __inline void
192 buf_dep_init(struct buf *bp)
194 bp->b_ops = NULL;
195 LIST_INIT(&bp->b_dep);
199 * Precondition: the buffer has some dependencies.
201 static __inline void
202 buf_deallocate(struct buf *bp)
204 struct bio_ops *ops = bp->b_ops;
206 KKASSERT(! LIST_EMPTY(&bp->b_dep));
207 if (ops)
208 ops->io_deallocate(bp);
211 static __inline int
212 buf_countdeps(struct buf *bp, int n)
214 struct bio_ops *ops = bp->b_ops;
215 int r;
217 if (ops)
218 r = ops->io_countdeps(bp, n);
219 else
220 r = 0;
221 return(r);
224 static __inline void
225 buf_start(struct buf *bp)
227 struct bio_ops *ops = bp->b_ops;
229 if (ops)
230 ops->io_start(bp);
233 static __inline void
234 buf_complete(struct buf *bp)
236 struct bio_ops *ops = bp->b_ops;
238 if (ops)
239 ops->io_complete(bp);
242 static __inline int
243 buf_fsync(struct vnode *vp)
245 struct bio_ops *ops = vp->v_mount->mnt_bioops;
246 int r;
248 if (ops)
249 r = ops->io_fsync(vp);
250 else
251 r = 0;
252 return(r);
255 static __inline void
256 buf_movedeps(struct buf *bp1, struct buf *bp2)
258 struct bio_ops *ops = bp1->b_ops;
260 if (ops)
261 ops->io_movedeps(bp1, bp2);
264 static __inline int
265 buf_checkread(struct buf *bp)
267 struct bio_ops *ops = bp->b_ops;
269 if (ops)
270 return(ops->io_checkread(bp));
271 return(0);
274 static __inline int
275 buf_checkwrite(struct buf *bp)
277 struct bio_ops *ops = bp->b_ops;
279 if (ops)
280 return(ops->io_checkwrite(bp));
281 return(0);
284 #endif /* _KERNEL */
286 #endif /* !_SYS_BUF2_H_ */