2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)buf.h 8.9 (Berkeley) 3/30/95
39 * $FreeBSD: src/sys/sys/buf.h,v 1.88.2.10 2003/01/25 19:02:23 dillon Exp $
40 * $DragonFly: src/sys/sys/buf2.h,v 1.21 2008/01/28 07:19:06 nth Exp $
49 #include <sys/buf.h> /* crit_*() functions */
51 #ifndef _SYS_GLOBALDATA_H_
52 #include <sys/globaldata.h> /* curthread */
54 #ifndef _SYS_THREAD2_H_
55 #include <sys/thread2.h> /* crit_*() functions */
57 #ifndef _SYS_SPINLOCK2_H_
58 #include <sys/spinlock2.h> /* crit_*() functions */
61 #include <sys/mount.h>
64 #include <sys/vnode.h>
66 #ifndef _VM_VM_PAGE_H_
67 #include <vm/vm_page.h>
73 #define BUF_LOCKINIT(bp) \
74 lockinit(&(bp)->b_lock, buf_wmesg, 0, 0)
78 * Get a lock sleeping non-interruptably until it becomes available.
80 * XXX lk_wmesg can race, but should not result in any operational issues.
83 BUF_LOCK(struct buf
*bp
, int locktype
)
85 bp
->b_lock
.lk_wmesg
= buf_wmesg
;
86 return (lockmgr(&(bp
)->b_lock
, locktype
));
89 * Get a lock sleeping with specified interruptably and timeout.
91 * XXX lk_timo can race against other entities calling BUF_TIMELOCK,
92 * but will not interfere with entities calling BUF_LOCK since LK_TIMELOCK
93 * will not be set in that case.
95 * XXX lk_wmesg can race, but should not result in any operational issues.
98 BUF_TIMELOCK(struct buf
*bp
, int locktype
, char *wmesg
, int timo
)
100 bp
->b_lock
.lk_wmesg
= wmesg
;
101 bp
->b_lock
.lk_timo
= timo
;
102 return (lockmgr(&(bp
)->b_lock
, locktype
| LK_TIMELOCK
));
105 * Release a lock. Only the acquiring process may free the lock unless
106 * it has been handed off to biodone.
109 BUF_UNLOCK(struct buf
*bp
)
111 lockmgr(&(bp
)->b_lock
, LK_RELEASE
);
115 * When initiating asynchronous I/O, change ownership of the lock to the
116 * kernel. Once done, the lock may legally released by biodone. The
117 * original owning process can no longer acquire it recursively, but must
118 * wait until the I/O is completed and the lock has been freed by biodone.
121 BUF_KERNPROC(struct buf
*bp
)
123 lockmgr_kernproc(&(bp
)->b_lock
);
126 * Find out the number of references to a lock.
128 * The non-blocking version should only be used for assertions in cases
129 * where the buffer is expected to be owned or otherwise data stable.
132 BUF_REFCNT(struct buf
*bp
)
134 return (lockcount(&(bp
)->b_lock
));
138 BUF_REFCNTNB(struct buf
*bp
)
140 return (lockcountnb(&(bp
)->b_lock
));
144 * Free a buffer lock.
146 #define BUF_LOCKFREE(bp) \
147 if (BUF_REFCNTNB(bp) > 0) \
148 panic("free locked buf")
151 bioq_init(struct bio_queue_head
*bioq
)
153 TAILQ_INIT(&bioq
->queue
);
154 bioq
->off_unused
= 0;
156 bioq
->transition
= NULL
;
157 bioq
->bio_unused
= NULL
;
161 bioq_insert_tail(struct bio_queue_head
*bioq
, struct bio
*bio
)
163 bioq
->transition
= NULL
;
164 TAILQ_INSERT_TAIL(&bioq
->queue
, bio
, bio_act
);
168 bioq_remove(struct bio_queue_head
*bioq
, struct bio
*bio
)
171 * Adjust read insertion point when removing the bioq. The
172 * bio after the insert point is a write so move backwards
173 * one (NULL will indicate all the reads have cleared).
175 if (bio
== bioq
->transition
)
176 bioq
->transition
= TAILQ_NEXT(bio
, bio_act
);
177 TAILQ_REMOVE(&bioq
->queue
, bio
, bio_act
);
180 static __inline
struct bio
*
181 bioq_first(struct bio_queue_head
*bioq
)
183 return (TAILQ_FIRST(&bioq
->queue
));
187 * Adjust buffer cache buffer's activity count. This
188 * works similarly to vm_page->act_count.
191 buf_act_advance(struct buf
*bp
)
193 if (bp
->b_act_count
> ACT_MAX
- ACT_ADVANCE
)
194 bp
->b_act_count
= ACT_MAX
;
196 bp
->b_act_count
+= ACT_ADVANCE
;
200 buf_act_decline(struct buf
*bp
)
202 if (bp
->b_act_count
< ACT_DECLINE
)
205 bp
->b_act_count
-= ACT_DECLINE
;
209 * biodeps inlines - used by softupdates and HAMMER.
212 buf_dep_init(struct buf
*bp
)
215 LIST_INIT(&bp
->b_dep
);
219 * Precondition: the buffer has some dependencies.
222 buf_deallocate(struct buf
*bp
)
224 struct bio_ops
*ops
= bp
->b_ops
;
226 KKASSERT(! LIST_EMPTY(&bp
->b_dep
));
228 ops
->io_deallocate(bp
);
232 buf_countdeps(struct buf
*bp
, int n
)
234 struct bio_ops
*ops
= bp
->b_ops
;
238 r
= ops
->io_countdeps(bp
, n
);
245 buf_start(struct buf
*bp
)
247 struct bio_ops
*ops
= bp
->b_ops
;
254 buf_complete(struct buf
*bp
)
256 struct bio_ops
*ops
= bp
->b_ops
;
259 ops
->io_complete(bp
);
263 buf_fsync(struct vnode
*vp
)
265 struct bio_ops
*ops
= vp
->v_mount
->mnt_bioops
;
269 r
= ops
->io_fsync(vp
);
276 buf_movedeps(struct buf
*bp1
, struct buf
*bp2
)
278 struct bio_ops
*ops
= bp1
->b_ops
;
281 ops
->io_movedeps(bp1
, bp2
);
285 buf_checkread(struct buf
*bp
)
287 struct bio_ops
*ops
= bp
->b_ops
;
290 return(ops
->io_checkread(bp
));
295 buf_checkwrite(struct buf
*bp
)
297 struct bio_ops
*ops
= bp
->b_ops
;
300 return(ops
->io_checkwrite(bp
));
305 * Chained biodone. The bio callback was made and the callback function
306 * wishes to chain the biodone. If no BIO's are left we call bpdone()
307 * with elseit=TRUE (asynchronous completion).
310 biodone_chain(struct bio
*bio
)
313 biodone(bio
->bio_prev
);
315 bpdone(bio
->bio_buf
, 1);
320 #endif /* !_SYS_BUF2_H_ */