usched: Allow process to change self cpu affinity
[dragonfly.git] / sys / sys / buf2.h
blob1b42f778648f183e8c3b82d96f8be3e41b1f25fc
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * @(#)buf.h 8.9 (Berkeley) 3/30/95
35 * $FreeBSD: src/sys/sys/buf.h,v 1.88.2.10 2003/01/25 19:02:23 dillon Exp $
36 * $DragonFly: src/sys/sys/buf2.h,v 1.21 2008/01/28 07:19:06 nth Exp $
39 #ifndef _SYS_BUF2_H_
40 #define _SYS_BUF2_H_
42 #ifdef _KERNEL
44 #ifndef _SYS_BUF_H_
45 #include <sys/buf.h> /* crit_*() functions */
46 #endif
47 #ifndef _SYS_GLOBALDATA_H_
48 #include <sys/globaldata.h> /* curthread */
49 #endif
50 #ifndef _SYS_THREAD2_H_
51 #include <sys/thread2.h> /* crit_*() functions */
52 #endif
53 #ifndef _SYS_SPINLOCK2_H_
54 #include <sys/spinlock2.h> /* crit_*() functions */
55 #endif
56 #ifndef _SYS_MOUNT_H_
57 #include <sys/mount.h>
58 #endif
59 #ifndef _SYS_VNODE_H_
60 #include <sys/vnode.h>
61 #endif
62 #ifndef _VM_VM_PAGE_H_
63 #include <vm/vm_page.h>
64 #endif
67 * Initialize a lock.
69 #define BUF_LOCKINIT(bp) \
70 lockinit(&(bp)->b_lock, buf_wmesg, 0, 0)
74 * Get a lock sleeping non-interruptably until it becomes available.
76 * XXX lk_wmesg can race, but should not result in any operational issues.
78 static __inline int
79 BUF_LOCK(struct buf *bp, int locktype)
81 bp->b_lock.lk_wmesg = buf_wmesg;
82 return (lockmgr(&(bp)->b_lock, locktype));
85 * Get a lock sleeping with specified interruptably and timeout.
87 * XXX lk_timo can race against other entities calling BUF_TIMELOCK,
88 * but will not interfere with entities calling BUF_LOCK since LK_TIMELOCK
89 * will not be set in that case.
91 * XXX lk_wmesg can race, but should not result in any operational issues.
93 static __inline int
94 BUF_TIMELOCK(struct buf *bp, int locktype, char *wmesg, int timo)
96 bp->b_lock.lk_wmesg = wmesg;
97 bp->b_lock.lk_timo = timo;
98 return (lockmgr(&(bp)->b_lock, locktype | LK_TIMELOCK));
101 * Release a lock. Only the acquiring process may free the lock unless
102 * it has been handed off to biodone.
104 static __inline void
105 BUF_UNLOCK(struct buf *bp)
107 lockmgr(&(bp)->b_lock, LK_RELEASE);
111 * When initiating asynchronous I/O, change ownership of the lock to the
112 * kernel. Once done, the lock may legally released by biodone. The
113 * original owning process can no longer acquire it recursively, but must
114 * wait until the I/O is completed and the lock has been freed by biodone.
116 static __inline void
117 BUF_KERNPROC(struct buf *bp)
119 lockmgr_kernproc(&(bp)->b_lock);
122 * Find out the number of references to a lock.
124 * The non-blocking version should only be used for assertions in cases
125 * where the buffer is expected to be owned or otherwise data stable.
127 static __inline int
128 BUF_REFCNT(struct buf *bp)
130 return (lockcount(&(bp)->b_lock));
133 static __inline int
134 BUF_REFCNTNB(struct buf *bp)
136 return (lockcountnb(&(bp)->b_lock));
140 * Free a buffer lock.
142 #define BUF_LOCKFREE(bp) \
143 if (BUF_REFCNTNB(bp) > 0) \
144 panic("free locked buf")
146 static __inline void
147 bioq_init(struct bio_queue_head *bioq)
149 TAILQ_INIT(&bioq->queue);
150 bioq->off_unused = 0;
151 bioq->reorder = 0;
152 bioq->transition = NULL;
153 bioq->bio_unused = NULL;
156 static __inline void
157 bioq_insert_tail(struct bio_queue_head *bioq, struct bio *bio)
159 bioq->transition = NULL;
160 TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
163 static __inline void
164 bioq_remove(struct bio_queue_head *bioq, struct bio *bio)
167 * Adjust read insertion point when removing the bioq. The
168 * bio after the insert point is a write so move backwards
169 * one (NULL will indicate all the reads have cleared).
171 if (bio == bioq->transition)
172 bioq->transition = TAILQ_NEXT(bio, bio_act);
173 TAILQ_REMOVE(&bioq->queue, bio, bio_act);
176 static __inline struct bio *
177 bioq_first(struct bio_queue_head *bioq)
179 return (TAILQ_FIRST(&bioq->queue));
182 static __inline struct bio *
183 bioq_takefirst(struct bio_queue_head *bioq)
185 struct bio *bp;
187 bp = TAILQ_FIRST(&bioq->queue);
188 if (bp != NULL)
189 bioq_remove(bioq, bp);
190 return (bp);
194 * Adjust buffer cache buffer's activity count. This
195 * works similarly to vm_page->act_count.
197 static __inline void
198 buf_act_advance(struct buf *bp)
200 if (bp->b_act_count > ACT_MAX - ACT_ADVANCE)
201 bp->b_act_count = ACT_MAX;
202 else
203 bp->b_act_count += ACT_ADVANCE;
206 static __inline void
207 buf_act_decline(struct buf *bp)
209 if (bp->b_act_count < ACT_DECLINE)
210 bp->b_act_count = 0;
211 else
212 bp->b_act_count -= ACT_DECLINE;
216 * biodeps inlines - used by softupdates and HAMMER.
218 * All bioops are MPSAFE
220 static __inline void
221 buf_dep_init(struct buf *bp)
223 bp->b_ops = NULL;
224 LIST_INIT(&bp->b_dep);
228 * Precondition: the buffer has some dependencies.
230 * MPSAFE
232 static __inline void
233 buf_deallocate(struct buf *bp)
235 struct bio_ops *ops = bp->b_ops;
237 KKASSERT(! LIST_EMPTY(&bp->b_dep));
238 if (ops)
239 ops->io_deallocate(bp);
243 * MPSAFE
245 static __inline int
246 buf_countdeps(struct buf *bp, int n)
248 struct bio_ops *ops = bp->b_ops;
249 int r;
251 if (ops)
252 r = ops->io_countdeps(bp, n);
253 else
254 r = 0;
255 return(r);
259 * MPSAFE
261 static __inline void
262 buf_start(struct buf *bp)
264 struct bio_ops *ops = bp->b_ops;
266 if (ops)
267 ops->io_start(bp);
271 * MPSAFE
273 static __inline void
274 buf_complete(struct buf *bp)
276 struct bio_ops *ops = bp->b_ops;
278 if (ops)
279 ops->io_complete(bp);
283 * MPSAFE
285 static __inline int
286 buf_fsync(struct vnode *vp)
288 struct bio_ops *ops = vp->v_mount->mnt_bioops;
289 int r;
291 if (ops)
292 r = ops->io_fsync(vp);
293 else
294 r = 0;
295 return(r);
299 * MPSAFE
301 static __inline void
302 buf_movedeps(struct buf *bp1, struct buf *bp2)
304 struct bio_ops *ops = bp1->b_ops;
306 if (ops)
307 ops->io_movedeps(bp1, bp2);
311 * MPSAFE
313 static __inline int
314 buf_checkread(struct buf *bp)
316 struct bio_ops *ops = bp->b_ops;
318 if (ops)
319 return(ops->io_checkread(bp));
320 return(0);
324 * MPSAFE
326 static __inline int
327 buf_checkwrite(struct buf *bp)
329 struct bio_ops *ops = bp->b_ops;
331 if (ops)
332 return(ops->io_checkwrite(bp));
333 return(0);
337 * Chained biodone. The bio callback was made and the callback function
338 * wishes to chain the biodone. If no BIO's are left we call bpdone()
339 * with elseit=TRUE (asynchronous completion).
341 * MPSAFE
343 static __inline void
344 biodone_chain(struct bio *bio)
346 if (bio->bio_prev)
347 biodone(bio->bio_prev);
348 else
349 bpdone(bio->bio_buf, 1);
352 static __inline int
353 bread(struct vnode *vp, off_t loffset, int size, struct buf **bpp)
355 *bpp = NULL;
356 return(breadnx(vp, loffset, size, NULL, NULL, 0, bpp));
360 static __inline int
361 breadn(struct vnode *vp, off_t loffset, int size, off_t *raoffset,
362 int *rabsize, int cnt, struct buf **bpp)
364 *bpp = NULL;
365 return(breadnx(vp, loffset, size, raoffset, rabsize, cnt, bpp));
368 static __inline int
369 cluster_read(struct vnode *vp, off_t filesize, off_t loffset,
370 int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
372 *bpp = NULL;
373 return(cluster_readx(vp, filesize, loffset, blksize, minreq,
374 maxreq, bpp));
377 #endif /* _KERNEL */
379 #endif /* !_SYS_BUF2_H_ */