2 * Copyright (c) 2004 Poul-Henning Kamp
3 * Copyright (c) 1994,1997 John S. Dyson
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * this file contains a new buffer I/O scheme implementing a coherent
30 * VM object and buffer cache scheme. Pains have been taken to make
31 * sure that the performance degradation associated with schemes such
32 * as this is not realized.
34 * Author: John S. Dyson
35 * Significant help during the development and debugging phases
36 * had been provided by David Greenman, also of the FreeBSD core team.
38 * see man buf(9) for more info.
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
44 #include <sys/param.h>
45 #include <sys/systm.h>
49 #include <sys/devicestat.h>
50 #include <sys/eventhandler.h>
51 #include <sys/limits.h>
53 #include <sys/malloc.h>
54 #include <sys/mount.h>
55 #include <sys/mutex.h>
56 #include <sys/kernel.h>
57 #include <sys/kthread.h>
59 #include <sys/resourcevar.h>
60 #include <sys/sysctl.h>
61 #include <sys/vmmeter.h>
62 #include <sys/vnode.h>
63 #include <geom/geom.h>
65 #include <vm/vm_param.h>
66 #include <vm/vm_kern.h>
67 #include <vm/vm_pageout.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_extern.h>
71 #include <vm/vm_map.h>
72 #include "opt_directio.h"
75 static MALLOC_DEFINE(M_BIOBUF
, "biobuf", "BIO buffer");
77 struct bio_ops bioops
; /* I/O operation notification */
79 struct buf_ops buf_ops_bio
= {
80 .bop_name
= "buf_ops_bio",
81 .bop_write
= bufwrite
,
82 .bop_strategy
= bufstrategy
,
84 .bop_bdflush
= bufbdflush
,
88 * XXX buf is global because kern_shutdown.c and ffs_checkoverlap has
89 * carnal knowledge of buffers. This knowledge should be moved to vfs_bio.c.
91 struct buf
*buf
; /* buffer header pool */
93 static struct proc
*bufdaemonproc
;
95 static int inmem(struct vnode
*vp
, daddr_t blkno
);
96 static void vm_hold_free_pages(struct buf
*bp
, vm_offset_t from
,
98 static void vm_hold_load_pages(struct buf
*bp
, vm_offset_t from
,
100 static void vfs_page_set_valid(struct buf
*bp
, vm_ooffset_t off
,
102 static void vfs_clean_pages(struct buf
*bp
);
103 static void vfs_setdirty(struct buf
*bp
);
104 static void vfs_setdirty_locked_object(struct buf
*bp
);
105 static void vfs_vmio_release(struct buf
*bp
);
106 static int vfs_bio_clcheck(struct vnode
*vp
, int size
,
107 daddr_t lblkno
, daddr_t blkno
);
108 static int flushbufqueues(int, int);
109 static void buf_daemon(void);
110 static void bremfreel(struct buf
*bp
);
112 int vmiodirenable
= TRUE
;
113 SYSCTL_INT(_vfs
, OID_AUTO
, vmiodirenable
, CTLFLAG_RW
, &vmiodirenable
, 0,
114 "Use the VM system for directory writes");
116 SYSCTL_INT(_vfs
, OID_AUTO
, runningbufspace
, CTLFLAG_RD
, &runningbufspace
, 0,
117 "Amount of presently outstanding async buffer io");
119 SYSCTL_INT(_vfs
, OID_AUTO
, bufspace
, CTLFLAG_RD
, &bufspace
, 0,
120 "KVA memory used for bufs");
121 static int maxbufspace
;
122 SYSCTL_INT(_vfs
, OID_AUTO
, maxbufspace
, CTLFLAG_RD
, &maxbufspace
, 0,
123 "Maximum allowed value of bufspace (including buf_daemon)");
124 static int bufmallocspace
;
125 SYSCTL_INT(_vfs
, OID_AUTO
, bufmallocspace
, CTLFLAG_RD
, &bufmallocspace
, 0,
126 "Amount of malloced memory for buffers");
127 static int maxbufmallocspace
;
128 SYSCTL_INT(_vfs
, OID_AUTO
, maxmallocbufspace
, CTLFLAG_RW
, &maxbufmallocspace
, 0,
129 "Maximum amount of malloced memory for buffers");
130 static int lobufspace
;
131 SYSCTL_INT(_vfs
, OID_AUTO
, lobufspace
, CTLFLAG_RD
, &lobufspace
, 0,
132 "Minimum amount of buffers we want to have");
134 SYSCTL_INT(_vfs
, OID_AUTO
, hibufspace
, CTLFLAG_RD
, &hibufspace
, 0,
135 "Maximum allowed value of bufspace (excluding buf_daemon)");
136 static int bufreusecnt
;
137 SYSCTL_INT(_vfs
, OID_AUTO
, bufreusecnt
, CTLFLAG_RW
, &bufreusecnt
, 0,
138 "Number of times we have reused a buffer");
139 static int buffreekvacnt
;
140 SYSCTL_INT(_vfs
, OID_AUTO
, buffreekvacnt
, CTLFLAG_RW
, &buffreekvacnt
, 0,
141 "Number of times we have freed the KVA space from some buffer");
142 static int bufdefragcnt
;
143 SYSCTL_INT(_vfs
, OID_AUTO
, bufdefragcnt
, CTLFLAG_RW
, &bufdefragcnt
, 0,
144 "Number of times we have had to repeat buffer allocation to defragment");
145 static int lorunningspace
;
146 SYSCTL_INT(_vfs
, OID_AUTO
, lorunningspace
, CTLFLAG_RW
, &lorunningspace
, 0,
147 "Minimum preferred space used for in-progress I/O");
148 static int hirunningspace
;
149 SYSCTL_INT(_vfs
, OID_AUTO
, hirunningspace
, CTLFLAG_RW
, &hirunningspace
, 0,
150 "Maximum amount of space to use for in-progress I/O");
151 int dirtybufferflushes
;
152 SYSCTL_INT(_vfs
, OID_AUTO
, dirtybufferflushes
, CTLFLAG_RW
, &dirtybufferflushes
,
153 0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
155 SYSCTL_INT(_vfs
, OID_AUTO
, bdwriteskip
, CTLFLAG_RW
, &bdwriteskip
,
156 0, "Number of buffers supplied to bdwrite with snapshot deadlock risk");
157 int altbufferflushes
;
158 SYSCTL_INT(_vfs
, OID_AUTO
, altbufferflushes
, CTLFLAG_RW
, &altbufferflushes
,
159 0, "Number of fsync flushes to limit dirty buffers");
160 static int recursiveflushes
;
161 SYSCTL_INT(_vfs
, OID_AUTO
, recursiveflushes
, CTLFLAG_RW
, &recursiveflushes
,
162 0, "Number of flushes skipped due to being recursive");
163 static int numdirtybuffers
;
164 SYSCTL_INT(_vfs
, OID_AUTO
, numdirtybuffers
, CTLFLAG_RD
, &numdirtybuffers
, 0,
165 "Number of buffers that are dirty (has unwritten changes) at the moment");
166 static int lodirtybuffers
;
167 SYSCTL_INT(_vfs
, OID_AUTO
, lodirtybuffers
, CTLFLAG_RW
, &lodirtybuffers
, 0,
168 "How many buffers we want to have free before bufdaemon can sleep");
169 static int hidirtybuffers
;
170 SYSCTL_INT(_vfs
, OID_AUTO
, hidirtybuffers
, CTLFLAG_RW
, &hidirtybuffers
, 0,
171 "When the number of dirty buffers is considered severe");
173 SYSCTL_INT(_vfs
, OID_AUTO
, dirtybufthresh
, CTLFLAG_RW
, &dirtybufthresh
,
174 0, "Number of bdwrite to bawrite conversions to clear dirty buffers");
175 static int numfreebuffers
;
176 SYSCTL_INT(_vfs
, OID_AUTO
, numfreebuffers
, CTLFLAG_RD
, &numfreebuffers
, 0,
177 "Number of free buffers");
178 static int lofreebuffers
;
179 SYSCTL_INT(_vfs
, OID_AUTO
, lofreebuffers
, CTLFLAG_RW
, &lofreebuffers
, 0,
181 static int hifreebuffers
;
182 SYSCTL_INT(_vfs
, OID_AUTO
, hifreebuffers
, CTLFLAG_RW
, &hifreebuffers
, 0,
183 "XXX Complicatedly unused");
184 static int getnewbufcalls
;
185 SYSCTL_INT(_vfs
, OID_AUTO
, getnewbufcalls
, CTLFLAG_RW
, &getnewbufcalls
, 0,
186 "Number of calls to getnewbuf");
187 static int getnewbufrestarts
;
188 SYSCTL_INT(_vfs
, OID_AUTO
, getnewbufrestarts
, CTLFLAG_RW
, &getnewbufrestarts
, 0,
189 "Number of times getnewbuf has had to restart a buffer aquisition");
192 * Wakeup point for bufdaemon, as well as indicator of whether it is already
193 * active. Set to 1 when the bufdaemon is already "on" the queue, 0 when it
196 static int bd_request
;
199 * This lock synchronizes access to bd_request.
201 static struct mtx bdlock
;
204 * bogus page -- for I/O to/from partially complete buffers
205 * this is a temporary solution to the problem, but it is not
206 * really that bad. it would be better to split the buffer
207 * for input in the case of buffers partially already in memory,
208 * but the code is intricate enough already.
210 vm_page_t bogus_page
;
213 * Synchronization (sleep/wakeup) variable for active buffer space requests.
214 * Set when wait starts, cleared prior to wakeup().
215 * Used in runningbufwakeup() and waitrunningbufspace().
217 static int runningbufreq
;
220 * This lock protects the runningbufreq and synchronizes runningbufwakeup and
221 * waitrunningbufspace().
223 static struct mtx rbreqlock
;
226 * Synchronization (sleep/wakeup) variable for buffer requests.
227 * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done
229 * Used in numdirtywakeup(), bufspacewakeup(), bufcountwakeup(), bwillwrite(),
230 * getnewbuf(), and getblk().
232 static int needsbuffer
;
235 * Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
237 static struct mtx nblock
;
240 * Definitions for the buffer free lists.
242 #define BUFFER_QUEUES 6 /* number of free buffer queues */
244 #define QUEUE_NONE 0 /* on no queue */
245 #define QUEUE_CLEAN 1 /* non-B_DELWRI buffers */
246 #define QUEUE_DIRTY 2 /* B_DELWRI buffers */
247 #define QUEUE_DIRTY_GIANT 3 /* B_DELWRI buffers that need giant */
248 #define QUEUE_EMPTYKVA 4 /* empty buffer headers w/KVA assignment */
249 #define QUEUE_EMPTY 5 /* empty buffer headers */
251 /* Queues for free buffers with various properties */
252 static TAILQ_HEAD(bqueues
, buf
) bufqueues
[BUFFER_QUEUES
] = { { 0 } };
254 /* Lock for the bufqueues */
255 static struct mtx bqlock
;
258 * Single global constant for BUF_WMESG, to avoid getting multiple references.
259 * buf_wmesg is referred from macros.
261 const char *buf_wmesg
= BUF_WMESG
;
263 #define VFS_BIO_NEED_ANY 0x01 /* any freeable buffer */
264 #define VFS_BIO_NEED_DIRTYFLUSH 0x02 /* waiting for dirty buffer flush */
265 #define VFS_BIO_NEED_FREE 0x04 /* wait for free bufs, hi hysteresis */
266 #define VFS_BIO_NEED_BUFSPACE 0x08 /* wait for buf space, lo hysteresis */
269 extern void ffs_rawread_setup(void);
270 #endif /* DIRECTIO */
274 * If someone is blocked due to there being too many dirty buffers,
275 * and numdirtybuffers is now reasonable, wake them up.
279 numdirtywakeup(int level
)
282 if (numdirtybuffers
<= level
) {
284 if (needsbuffer
& VFS_BIO_NEED_DIRTYFLUSH
) {
285 needsbuffer
&= ~VFS_BIO_NEED_DIRTYFLUSH
;
286 wakeup(&needsbuffer
);
295 * Called when buffer space is potentially available for recovery.
296 * getnewbuf() will block on this flag when it is unable to free
297 * sufficient buffer space. Buffer space becomes recoverable when
298 * bp's get placed back in the queues.
306 * If someone is waiting for BUF space, wake them up. Even
307 * though we haven't freed the kva space yet, the waiting
308 * process will be able to now.
311 if (needsbuffer
& VFS_BIO_NEED_BUFSPACE
) {
312 needsbuffer
&= ~VFS_BIO_NEED_BUFSPACE
;
313 wakeup(&needsbuffer
);
319 * runningbufwakeup() - in-progress I/O accounting.
323 runningbufwakeup(struct buf
*bp
)
326 if (bp
->b_runningbufspace
) {
327 atomic_subtract_int(&runningbufspace
, bp
->b_runningbufspace
);
328 bp
->b_runningbufspace
= 0;
329 mtx_lock(&rbreqlock
);
330 if (runningbufreq
&& runningbufspace
<= lorunningspace
) {
332 wakeup(&runningbufreq
);
334 mtx_unlock(&rbreqlock
);
341 * Called when a buffer has been added to one of the free queues to
342 * account for the buffer and to wakeup anyone waiting for free buffers.
343 * This typically occurs when large amounts of metadata are being handled
344 * by the buffer cache ( else buffer space runs out first, usually ).
351 atomic_add_int(&numfreebuffers
, 1);
354 needsbuffer
&= ~VFS_BIO_NEED_ANY
;
355 if (numfreebuffers
>= hifreebuffers
)
356 needsbuffer
&= ~VFS_BIO_NEED_FREE
;
357 wakeup(&needsbuffer
);
363 * waitrunningbufspace()
365 * runningbufspace is a measure of the amount of I/O currently
366 * running. This routine is used in async-write situations to
367 * prevent creating huge backups of pending writes to a device.
368 * Only asynchronous writes are governed by this function.
370 * Reads will adjust runningbufspace, but will not block based on it.
371 * The read load has a side effect of reducing the allowed write load.
373 * This does NOT turn an async write into a sync write. It waits
374 * for earlier writes to complete and generally returns before the
375 * caller's write has reached the device.
378 waitrunningbufspace(void)
381 mtx_lock(&rbreqlock
);
382 while (runningbufspace
> hirunningspace
) {
384 msleep(&runningbufreq
, &rbreqlock
, PVM
, "wdrain", 0);
386 mtx_unlock(&rbreqlock
);
391 * vfs_buf_test_cache:
393 * Called when a buffer is extended. This function clears the B_CACHE
394 * bit if the newly extended portion of the buffer does not contain
399 vfs_buf_test_cache(struct buf
*bp
,
400 vm_ooffset_t foff
, vm_offset_t off
, vm_offset_t size
,
404 VM_OBJECT_LOCK_ASSERT(m
->object
, MA_OWNED
);
405 if (bp
->b_flags
& B_CACHE
) {
406 int base
= (foff
+ off
) & PAGE_MASK
;
407 if (vm_page_is_valid(m
, base
, size
) == 0)
408 bp
->b_flags
&= ~B_CACHE
;
412 /* Wake up the buffer daemon if necessary */
415 bd_wakeup(int dirtybuflevel
)
419 if (bd_request
== 0 && numdirtybuffers
>= dirtybuflevel
) {
427 * bd_speedup - speedup the buffer cache flushing code
439 * Calculating buffer cache scaling values and reserve space for buffer
440 * headers. This is called during low level kernel initialization and
441 * may be called more then once. We CANNOT write to the memory area
442 * being reserved at this time.
445 kern_vfs_bio_buffer_alloc(caddr_t v
, long physmem_est
)
450 * physmem_est is in pages. Convert it to kilobytes (assumes
451 * PAGE_SIZE is >= 1K)
453 physmem_est
= physmem_est
* (PAGE_SIZE
/ 1024);
456 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
457 * For the first 64MB of ram nominally allocate sufficient buffers to
458 * cover 1/4 of our ram. Beyond the first 64MB allocate additional
459 * buffers to cover 1/10 of our ram over 64MB. When auto-sizing
460 * the buffer cache we limit the eventual kva reservation to
463 * factor represents the 1/4 x ram conversion.
466 int factor
= 4 * BKVASIZE
/ 1024;
469 if (physmem_est
> 4096)
470 nbuf
+= min((physmem_est
- 4096) / factor
,
472 if (physmem_est
> 65536)
473 nbuf
+= (physmem_est
- 65536) * 2 / (factor
* 5);
475 if (maxbcache
&& nbuf
> maxbcache
/ BKVASIZE
)
476 nbuf
= maxbcache
/ BKVASIZE
;
478 /* XXX Avoid integer overflows later on with maxbufspace. */
479 maxbuf
= (INT_MAX
/ 3) / BKVASIZE
;
485 * swbufs are used as temporary holders for I/O, such as paging I/O.
486 * We have no less then 16 and no more then 256.
488 nswbuf
= max(min(nbuf
/4, 256), 16);
490 if (nswbuf
< NSWBUF_MIN
)
498 * Reserve space for the buffer cache buffers
501 v
= (caddr_t
)(swbuf
+ nswbuf
);
503 v
= (caddr_t
)(buf
+ nbuf
);
508 /* Initialize the buffer subsystem. Called before use of any buffers. */
515 mtx_init(&bqlock
, "buf queue lock", NULL
, MTX_DEF
);
516 mtx_init(&rbreqlock
, "runningbufspace lock", NULL
, MTX_DEF
);
517 mtx_init(&nblock
, "needsbuffer lock", NULL
, MTX_DEF
);
518 mtx_init(&bdlock
, "buffer daemon lock", NULL
, MTX_DEF
);
520 /* next, make a null set of free lists */
521 for (i
= 0; i
< BUFFER_QUEUES
; i
++)
522 TAILQ_INIT(&bufqueues
[i
]);
524 /* finally, initialize each buffer header and stick on empty q */
525 for (i
= 0; i
< nbuf
; i
++) {
527 bzero(bp
, sizeof *bp
);
528 bp
->b_flags
= B_INVAL
; /* we're just an empty header */
529 bp
->b_rcred
= NOCRED
;
530 bp
->b_wcred
= NOCRED
;
531 bp
->b_qindex
= QUEUE_EMPTY
;
534 LIST_INIT(&bp
->b_dep
);
536 TAILQ_INSERT_TAIL(&bufqueues
[QUEUE_EMPTY
], bp
, b_freelist
);
540 * maxbufspace is the absolute maximum amount of buffer space we are
541 * allowed to reserve in KVM and in real terms. The absolute maximum
542 * is nominally used by buf_daemon. hibufspace is the nominal maximum
543 * used by most other processes. The differential is required to
544 * ensure that buf_daemon is able to run when other processes might
545 * be blocked waiting for buffer space.
547 * maxbufspace is based on BKVASIZE. Allocating buffers larger then
548 * this may result in KVM fragmentation which is not handled optimally
551 maxbufspace
= nbuf
* BKVASIZE
;
552 hibufspace
= imax(3 * maxbufspace
/ 4, maxbufspace
- MAXBSIZE
* 10);
553 lobufspace
= hibufspace
- MAXBSIZE
;
555 lorunningspace
= 512 * 1024;
556 hirunningspace
= 1024 * 1024;
559 * Limit the amount of malloc memory since it is wired permanently into
560 * the kernel space. Even though this is accounted for in the buffer
561 * allocation, we don't want the malloced region to grow uncontrolled.
562 * The malloc scheme improves memory utilization significantly on average
563 * (small) directories.
565 maxbufmallocspace
= hibufspace
/ 20;
568 * Reduce the chance of a deadlock occuring by limiting the number
569 * of delayed-write dirty buffers we allow to stack up.
571 hidirtybuffers
= nbuf
/ 4 + 20;
572 dirtybufthresh
= hidirtybuffers
* 9 / 10;
575 * To support extreme low-memory systems, make sure hidirtybuffers cannot
576 * eat up all available buffer space. This occurs when our minimum cannot
577 * be met. We try to size hidirtybuffers to 3/4 our buffer space assuming
578 * BKVASIZE'd (8K) buffers.
580 while (hidirtybuffers
* BKVASIZE
> 3 * hibufspace
/ 4) {
581 hidirtybuffers
>>= 1;
583 lodirtybuffers
= hidirtybuffers
/ 2;
586 * Try to keep the number of free buffers in the specified range,
587 * and give special processes (e.g. like buf_daemon) access to an
590 lofreebuffers
= nbuf
/ 18 + 5;
591 hifreebuffers
= 2 * lofreebuffers
;
592 numfreebuffers
= nbuf
;
595 * Maximum number of async ops initiated per buf_daemon loop. This is
596 * somewhat of a hack at the moment, we really need to limit ourselves
597 * based on the number of bytes of I/O in-transit that were initiated
601 bogus_page
= vm_page_alloc(NULL
, 0, VM_ALLOC_NOOBJ
|
602 VM_ALLOC_NORMAL
| VM_ALLOC_WIRED
);
606 * bfreekva() - free the kva allocation for a buffer.
608 * Since this call frees up buffer space, we call bufspacewakeup().
611 bfreekva(struct buf
*bp
)
615 atomic_add_int(&buffreekvacnt
, 1);
616 atomic_subtract_int(&bufspace
, bp
->b_kvasize
);
617 vm_map_remove(buffer_map
, (vm_offset_t
) bp
->b_kvabase
,
618 (vm_offset_t
) bp
->b_kvabase
+ bp
->b_kvasize
);
627 * Mark the buffer for removal from the appropriate free list in brelse.
631 bremfree(struct buf
*bp
)
634 CTR3(KTR_BUF
, "bremfree(%p) vp %p flags %X", bp
, bp
->b_vp
, bp
->b_flags
);
635 KASSERT((bp
->b_flags
& B_REMFREE
) == 0,
636 ("bremfree: buffer %p already marked for delayed removal.", bp
));
637 KASSERT(bp
->b_qindex
!= QUEUE_NONE
,
638 ("bremfree: buffer %p not on a queue.", bp
));
641 bp
->b_flags
|= B_REMFREE
;
642 /* Fixup numfreebuffers count. */
643 if ((bp
->b_flags
& B_INVAL
) || (bp
->b_flags
& B_DELWRI
) == 0)
644 atomic_subtract_int(&numfreebuffers
, 1);
650 * Force an immediate removal from a free list. Used only in nfs when
651 * it abuses the b_freelist pointer.
654 bremfreef(struct buf
*bp
)
664 * Removes a buffer from the free list, must be called with the
668 bremfreel(struct buf
*bp
)
670 CTR3(KTR_BUF
, "bremfreel(%p) vp %p flags %X",
671 bp
, bp
->b_vp
, bp
->b_flags
);
672 KASSERT(bp
->b_qindex
!= QUEUE_NONE
,
673 ("bremfreel: buffer %p not on a queue.", bp
));
675 mtx_assert(&bqlock
, MA_OWNED
);
677 TAILQ_REMOVE(&bufqueues
[bp
->b_qindex
], bp
, b_freelist
);
678 bp
->b_qindex
= QUEUE_NONE
;
680 * If this was a delayed bremfree() we only need to remove the buffer
681 * from the queue and return the stats are already done.
683 if (bp
->b_flags
& B_REMFREE
) {
684 bp
->b_flags
&= ~B_REMFREE
;
688 * Fixup numfreebuffers count. If the buffer is invalid or not
689 * delayed-write, the buffer was free and we must decrement
692 if ((bp
->b_flags
& B_INVAL
) || (bp
->b_flags
& B_DELWRI
) == 0)
693 atomic_subtract_int(&numfreebuffers
, 1);
698 * Get a buffer with the specified data. Look in the cache first. We
699 * must clear BIO_ERROR and B_INVAL prior to initiating I/O. If B_CACHE
700 * is set, the buffer is valid and we do not have to do anything ( see
701 * getblk() ). This is really just a special case of breadn().
704 bread(struct vnode
* vp
, daddr_t blkno
, int size
, struct ucred
* cred
,
708 return (breadn(vp
, blkno
, size
, 0, 0, 0, cred
, bpp
));
712 * Attempt to initiate asynchronous I/O on read-ahead blocks. We must
713 * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
714 * the buffer is valid and we do not have to do anything.
717 breada(struct vnode
* vp
, daddr_t
* rablkno
, int * rabsize
,
718 int cnt
, struct ucred
* cred
)
723 for (i
= 0; i
< cnt
; i
++, rablkno
++, rabsize
++) {
724 if (inmem(vp
, *rablkno
))
726 rabp
= getblk(vp
, *rablkno
, *rabsize
, 0, 0, 0);
728 if ((rabp
->b_flags
& B_CACHE
) == 0) {
729 if (!TD_IS_IDLETHREAD(curthread
))
730 curthread
->td_ru
.ru_inblock
++;
731 rabp
->b_flags
|= B_ASYNC
;
732 rabp
->b_flags
&= ~B_INVAL
;
733 rabp
->b_ioflags
&= ~BIO_ERROR
;
734 rabp
->b_iocmd
= BIO_READ
;
735 if (rabp
->b_rcred
== NOCRED
&& cred
!= NOCRED
)
736 rabp
->b_rcred
= crhold(cred
);
737 vfs_busy_pages(rabp
, 0);
739 rabp
->b_iooffset
= dbtob(rabp
->b_blkno
);
748 * Operates like bread, but also starts asynchronous I/O on
752 breadn(struct vnode
* vp
, daddr_t blkno
, int size
,
753 daddr_t
* rablkno
, int *rabsize
,
754 int cnt
, struct ucred
* cred
, struct buf
**bpp
)
757 int rv
= 0, readwait
= 0;
759 CTR3(KTR_BUF
, "breadn(%p, %jd, %d)", vp
, blkno
, size
);
760 *bpp
= bp
= getblk(vp
, blkno
, size
, 0, 0, 0);
762 /* if not found in cache, do some I/O */
763 if ((bp
->b_flags
& B_CACHE
) == 0) {
764 if (!TD_IS_IDLETHREAD(curthread
))
765 curthread
->td_ru
.ru_inblock
++;
766 bp
->b_iocmd
= BIO_READ
;
767 bp
->b_flags
&= ~B_INVAL
;
768 bp
->b_ioflags
&= ~BIO_ERROR
;
769 if (bp
->b_rcred
== NOCRED
&& cred
!= NOCRED
)
770 bp
->b_rcred
= crhold(cred
);
771 vfs_busy_pages(bp
, 0);
772 bp
->b_iooffset
= dbtob(bp
->b_blkno
);
777 breada(vp
, rablkno
, rabsize
, cnt
, cred
);
786 * Write, release buffer on completion. (Done by iodone
787 * if async). Do not bother writing anything if the buffer
790 * Note that we set B_CACHE here, indicating that buffer is
791 * fully valid and thus cacheable. This is true even of NFS
792 * now so we set it generally. This could be set either here
793 * or in biodone() since the I/O is synchronous. We put it
797 bufwrite(struct buf
*bp
)
803 CTR3(KTR_BUF
, "bufwrite(%p) vp %p flags %X", bp
, bp
->b_vp
, bp
->b_flags
);
804 if (bp
->b_flags
& B_INVAL
) {
809 oldflags
= bp
->b_flags
;
813 if (bp
->b_pin_count
> 0)
816 KASSERT(!(bp
->b_vflags
& BV_BKGRDINPROG
),
817 ("FFS background buffer should not get here %p", bp
));
821 vp_md
= vp
->v_vflag
& VV_MD
;
825 /* Mark the buffer clean */
828 bp
->b_flags
&= ~B_DONE
;
829 bp
->b_ioflags
&= ~BIO_ERROR
;
830 bp
->b_flags
|= B_CACHE
;
831 bp
->b_iocmd
= BIO_WRITE
;
833 bufobj_wref(bp
->b_bufobj
);
834 vfs_busy_pages(bp
, 1);
837 * Normal bwrites pipeline writes
839 bp
->b_runningbufspace
= bp
->b_bufsize
;
840 atomic_add_int(&runningbufspace
, bp
->b_runningbufspace
);
842 if (!TD_IS_IDLETHREAD(curthread
))
843 curthread
->td_ru
.ru_oublock
++;
844 if (oldflags
& B_ASYNC
)
846 bp
->b_iooffset
= dbtob(bp
->b_blkno
);
849 if ((oldflags
& B_ASYNC
) == 0) {
850 int rtval
= bufwait(bp
);
855 * don't allow the async write to saturate the I/O
856 * system. We will not deadlock here because
857 * we are blocking waiting for I/O that is already in-progress
858 * to complete. We do not block here if it is the update
859 * or syncer daemon trying to clean up as that can lead
862 if ((curthread
->td_pflags
& TDP_NORUNNINGBUF
) == 0 && !vp_md
)
863 waitrunningbufspace();
870 bufbdflush(struct bufobj
*bo
, struct buf
*bp
)
874 if (bo
->bo_dirty
.bv_cnt
> dirtybufthresh
+ 10) {
875 (void) VOP_FSYNC(bp
->b_vp
, MNT_NOWAIT
, curthread
);
877 } else if (bo
->bo_dirty
.bv_cnt
> dirtybufthresh
) {
880 * Try to find a buffer to flush.
882 TAILQ_FOREACH(nbp
, &bo
->bo_dirty
.bv_hd
, b_bobufs
) {
883 if ((nbp
->b_vflags
& BV_BKGRDINPROG
) ||
885 LK_EXCLUSIVE
| LK_NOWAIT
, NULL
))
888 panic("bdwrite: found ourselves");
890 /* Don't countdeps with the bo lock held. */
891 if (buf_countdeps(nbp
, 0)) {
896 if (nbp
->b_flags
& B_CLUSTEROK
) {
902 dirtybufferflushes
++;
911 * Delayed write. (Buffer is marked dirty). Do not bother writing
912 * anything if the buffer is marked invalid.
914 * Note that since the buffer must be completely valid, we can safely
915 * set B_CACHE. In fact, we have to set B_CACHE here rather then in
916 * biodone() in order to prevent getblk from writing the buffer
920 bdwrite(struct buf
*bp
)
922 struct thread
*td
= curthread
;
926 CTR3(KTR_BUF
, "bdwrite(%p) vp %p flags %X", bp
, bp
->b_vp
, bp
->b_flags
);
927 KASSERT(bp
->b_bufobj
!= NULL
, ("No b_bufobj %p", bp
));
930 if (bp
->b_flags
& B_INVAL
) {
936 * If we have too many dirty buffers, don't create any more.
937 * If we are wildly over our limit, then force a complete
938 * cleanup. Otherwise, just keep the situation from getting
939 * out of control. Note that we have to avoid a recursive
940 * disaster and not try to clean up after our own cleanup!
944 if ((td
->td_pflags
& (TDP_COWINPROGRESS
|TDP_INBDFLUSH
)) == 0) {
945 td
->td_pflags
|= TDP_INBDFLUSH
;
947 td
->td_pflags
&= ~TDP_INBDFLUSH
;
953 * Set B_CACHE, indicating that the buffer is fully valid. This is
954 * true even of NFS now.
956 bp
->b_flags
|= B_CACHE
;
959 * This bmap keeps the system from needing to do the bmap later,
960 * perhaps when the system is attempting to do a sync. Since it
961 * is likely that the indirect block -- or whatever other datastructure
962 * that the filesystem needs is still in memory now, it is a good
963 * thing to do this. Note also, that if the pageout daemon is
964 * requesting a sync -- there might not be enough memory to do
965 * the bmap then... So, this is important to do.
967 if (vp
->v_type
!= VCHR
&& bp
->b_lblkno
== bp
->b_blkno
) {
968 VOP_BMAP(vp
, bp
->b_lblkno
, NULL
, &bp
->b_blkno
, NULL
, NULL
);
972 * Set the *dirty* buffer range based upon the VM system dirty pages.
977 * We need to do this here to satisfy the vnode_pager and the
978 * pageout daemon, so that it thinks that the pages have been
979 * "cleaned". Note that since the pages are in a delayed write
980 * buffer -- the VFS layer "will" see that the pages get written
981 * out on the next sync, or perhaps the cluster will be completed.
987 * Wakeup the buffer flushing daemon if we have a lot of dirty
988 * buffers (midpoint between our recovery point and our stall
991 bd_wakeup((lodirtybuffers
+ hidirtybuffers
) / 2);
994 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
995 * due to the softdep code.
1002 * Turn buffer into delayed write request. We must clear BIO_READ and
1003 * B_RELBUF, and we must set B_DELWRI. We reassign the buffer to
1004 * itself to properly update it in the dirty/clean lists. We mark it
1005 * B_DONE to ensure that any asynchronization of the buffer properly
1006 * clears B_DONE ( else a panic will occur later ).
1008 * bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
1009 * might have been set pre-getblk(). Unlike bwrite/bdwrite, bdirty()
1010 * should only be called if the buffer is known-good.
1012 * Since the buffer is not on a queue, we do not update the numfreebuffers
1015 * The buffer must be on QUEUE_NONE.
1018 bdirty(struct buf
*bp
)
1021 CTR3(KTR_BUF
, "bdirty(%p) vp %p flags %X",
1022 bp
, bp
->b_vp
, bp
->b_flags
);
1023 KASSERT(bp
->b_bufobj
!= NULL
, ("No b_bufobj %p", bp
));
1024 KASSERT(bp
->b_flags
& B_REMFREE
|| bp
->b_qindex
== QUEUE_NONE
,
1025 ("bdirty: buffer %p still on queue %d", bp
, bp
->b_qindex
));
1026 BUF_ASSERT_HELD(bp
);
1027 bp
->b_flags
&= ~(B_RELBUF
);
1028 bp
->b_iocmd
= BIO_WRITE
;
1030 if ((bp
->b_flags
& B_DELWRI
) == 0) {
1031 bp
->b_flags
|= /* XXX B_DONE | */ B_DELWRI
;
1033 atomic_add_int(&numdirtybuffers
, 1);
1034 bd_wakeup((lodirtybuffers
+ hidirtybuffers
) / 2);
1041 * Clear B_DELWRI for buffer.
1043 * Since the buffer is not on a queue, we do not update the numfreebuffers
1046 * The buffer must be on QUEUE_NONE.
1050 bundirty(struct buf
*bp
)
1053 CTR3(KTR_BUF
, "bundirty(%p) vp %p flags %X", bp
, bp
->b_vp
, bp
->b_flags
);
1054 KASSERT(bp
->b_bufobj
!= NULL
, ("No b_bufobj %p", bp
));
1055 KASSERT(bp
->b_flags
& B_REMFREE
|| bp
->b_qindex
== QUEUE_NONE
,
1056 ("bundirty: buffer %p still on queue %d", bp
, bp
->b_qindex
));
1057 BUF_ASSERT_HELD(bp
);
1059 if (bp
->b_flags
& B_DELWRI
) {
1060 bp
->b_flags
&= ~B_DELWRI
;
1062 atomic_subtract_int(&numdirtybuffers
, 1);
1063 numdirtywakeup(lodirtybuffers
);
1066 * Since it is now being written, we can clear its deferred write flag.
1068 bp
->b_flags
&= ~B_DEFERRED
;
1074 * Asynchronous write. Start output on a buffer, but do not wait for
1075 * it to complete. The buffer is released when the output completes.
1077 * bwrite() ( or the VOP routine anyway ) is responsible for handling
1078 * B_INVAL buffers. Not us.
1081 bawrite(struct buf
*bp
)
1084 bp
->b_flags
|= B_ASYNC
;
1091 * Called prior to the locking of any vnodes when we are expecting to
1092 * write. We do not want to starve the buffer cache with too many
1093 * dirty buffers so we block here. By blocking prior to the locking
1094 * of any vnodes we attempt to avoid the situation where a locked vnode
1095 * prevents the various system daemons from flushing related buffers.
1102 if (numdirtybuffers
>= hidirtybuffers
) {
1104 while (numdirtybuffers
>= hidirtybuffers
) {
1106 needsbuffer
|= VFS_BIO_NEED_DIRTYFLUSH
;
1107 msleep(&needsbuffer
, &nblock
,
1108 (PRIBIO
+ 4), "flswai", 0);
1110 mtx_unlock(&nblock
);
1115 * Return true if we have too many dirty buffers.
1118 buf_dirty_count_severe(void)
1121 return(numdirtybuffers
>= hidirtybuffers
);
1127 * Release a busy buffer and, if requested, free its resources. The
1128 * buffer will be stashed in the appropriate bufqueue[] allowing it
1129 * to be accessed later as a cache entity or reused for other purposes.
1132 brelse(struct buf
*bp
)
1134 CTR3(KTR_BUF
, "brelse(%p) vp %p flags %X",
1135 bp
, bp
->b_vp
, bp
->b_flags
);
1136 KASSERT(!(bp
->b_flags
& (B_CLUSTER
|B_PAGING
)),
1137 ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp
));
1139 if (bp
->b_flags
& B_MANAGED
) {
1144 if (bp
->b_iocmd
== BIO_WRITE
&& (bp
->b_ioflags
& BIO_ERROR
) &&
1145 bp
->b_error
== EIO
&& !(bp
->b_flags
& B_INVAL
)) {
1147 * Failed write, redirty. Must clear BIO_ERROR to prevent
1148 * pages from being scrapped. If the error is anything
1149 * other than an I/O error (EIO), assume that retryingi
1152 bp
->b_ioflags
&= ~BIO_ERROR
;
1154 } else if ((bp
->b_flags
& (B_NOCACHE
| B_INVAL
)) ||
1155 (bp
->b_ioflags
& BIO_ERROR
) || (bp
->b_bufsize
<= 0)) {
1157 * Either a failed I/O or we were asked to free or not
1160 bp
->b_flags
|= B_INVAL
;
1161 if (!LIST_EMPTY(&bp
->b_dep
))
1163 if (bp
->b_flags
& B_DELWRI
) {
1164 atomic_subtract_int(&numdirtybuffers
, 1);
1165 numdirtywakeup(lodirtybuffers
);
1167 bp
->b_flags
&= ~(B_DELWRI
| B_CACHE
);
1168 if ((bp
->b_flags
& B_VMIO
) == 0) {
1177 * We must clear B_RELBUF if B_DELWRI is set. If vfs_vmio_release()
1178 * is called with B_DELWRI set, the underlying pages may wind up
1179 * getting freed causing a previous write (bdwrite()) to get 'lost'
1180 * because pages associated with a B_DELWRI bp are marked clean.
1182 * We still allow the B_INVAL case to call vfs_vmio_release(), even
1183 * if B_DELWRI is set.
1185 * If B_DELWRI is not set we may have to set B_RELBUF if we are low
1186 * on pages to return pages to the VM page queues.
1188 if (bp
->b_flags
& B_DELWRI
)
1189 bp
->b_flags
&= ~B_RELBUF
;
1190 else if (vm_page_count_severe()) {
1192 * The locking of the BO_LOCK is not necessary since
1193 * BKGRDINPROG cannot be set while we hold the buf
1194 * lock, it can only be cleared if it is already
1198 if (!(bp
->b_vflags
& BV_BKGRDINPROG
))
1199 bp
->b_flags
|= B_RELBUF
;
1201 bp
->b_flags
|= B_RELBUF
;
1205 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer
1206 * constituted, not even NFS buffers now. Two flags effect this. If
1207 * B_INVAL, the struct buf is invalidated but the VM object is kept
1208 * around ( i.e. so it is trivial to reconstitute the buffer later ).
1210 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
1211 * invalidated. BIO_ERROR cannot be set for a failed write unless the
1212 * buffer is also B_INVAL because it hits the re-dirtying code above.
1214 * Normally we can do this whether a buffer is B_DELWRI or not. If
1215 * the buffer is an NFS buffer, it is tracking piecemeal writes or
1216 * the commit state and we cannot afford to lose the buffer. If the
1217 * buffer has a background write in progress, we need to keep it
1218 * around to prevent it from being reconstituted and starting a second
1221 if ((bp
->b_flags
& B_VMIO
)
1222 && !(bp
->b_vp
->v_mount
!= NULL
&&
1223 (bp
->b_vp
->v_mount
->mnt_vfc
->vfc_flags
& VFCF_NETWORK
) != 0 &&
1224 !vn_isdisk(bp
->b_vp
, NULL
) &&
1225 (bp
->b_flags
& B_DELWRI
))
1234 obj
= bp
->b_bufobj
->bo_object
;
1237 * Get the base offset and length of the buffer. Note that
1238 * in the VMIO case if the buffer block size is not
1239 * page-aligned then b_data pointer may not be page-aligned.
1240 * But our b_pages[] array *IS* page aligned.
1242 * block sizes less then DEV_BSIZE (usually 512) are not
1243 * supported due to the page granularity bits (m->valid,
1244 * m->dirty, etc...).
1246 * See man buf(9) for more information
1248 resid
= bp
->b_bufsize
;
1249 foff
= bp
->b_offset
;
1250 VM_OBJECT_LOCK(obj
);
1251 for (i
= 0; i
< bp
->b_npages
; i
++) {
1257 * If we hit a bogus page, fixup *all* the bogus pages
1260 if (m
== bogus_page
) {
1261 poff
= OFF_TO_IDX(bp
->b_offset
);
1264 for (j
= i
; j
< bp
->b_npages
; j
++) {
1266 mtmp
= bp
->b_pages
[j
];
1267 if (mtmp
== bogus_page
) {
1268 mtmp
= vm_page_lookup(obj
, poff
+ j
);
1270 panic("brelse: page missing\n");
1272 bp
->b_pages
[j
] = mtmp
;
1276 if ((bp
->b_flags
& B_INVAL
) == 0) {
1278 trunc_page((vm_offset_t
)bp
->b_data
),
1279 bp
->b_pages
, bp
->b_npages
);
1283 if ((bp
->b_flags
& B_NOCACHE
) ||
1284 (bp
->b_ioflags
& BIO_ERROR
)) {
1285 int poffset
= foff
& PAGE_MASK
;
1286 int presid
= resid
> (PAGE_SIZE
- poffset
) ?
1287 (PAGE_SIZE
- poffset
) : resid
;
1289 KASSERT(presid
>= 0, ("brelse: extra page"));
1290 vm_page_lock_queues();
1291 vm_page_set_invalid(m
, poffset
, presid
);
1292 vm_page_unlock_queues();
1294 printf("avoided corruption bug in bogus_page/brelse code\n");
1296 resid
-= PAGE_SIZE
- (foff
& PAGE_MASK
);
1297 foff
= (foff
+ PAGE_SIZE
) & ~(off_t
)PAGE_MASK
;
1299 VM_OBJECT_UNLOCK(obj
);
1300 if (bp
->b_flags
& (B_INVAL
| B_RELBUF
))
1301 vfs_vmio_release(bp
);
1303 } else if (bp
->b_flags
& B_VMIO
) {
1305 if (bp
->b_flags
& (B_INVAL
| B_RELBUF
)) {
1306 vfs_vmio_release(bp
);
1309 } else if ((bp
->b_flags
& (B_INVAL
| B_RELBUF
)) != 0) {
1310 if (bp
->b_bufsize
!= 0)
1312 if (bp
->b_vp
!= NULL
)
1316 if (BUF_LOCKRECURSED(bp
)) {
1317 /* do not release to free list */
1324 /* Handle delayed bremfree() processing. */
1325 if (bp
->b_flags
& B_REMFREE
)
1327 if (bp
->b_qindex
!= QUEUE_NONE
)
1328 panic("brelse: free buffer onto another queue???");
1330 /* buffers with no memory */
1331 if (bp
->b_bufsize
== 0) {
1332 bp
->b_flags
|= B_INVAL
;
1333 bp
->b_xflags
&= ~(BX_BKGRDWRITE
| BX_ALTDATA
);
1334 if (bp
->b_vflags
& BV_BKGRDINPROG
)
1335 panic("losing buffer 1");
1336 if (bp
->b_kvasize
) {
1337 bp
->b_qindex
= QUEUE_EMPTYKVA
;
1339 bp
->b_qindex
= QUEUE_EMPTY
;
1341 TAILQ_INSERT_HEAD(&bufqueues
[bp
->b_qindex
], bp
, b_freelist
);
1342 /* buffers with junk contents */
1343 } else if (bp
->b_flags
& (B_INVAL
| B_NOCACHE
| B_RELBUF
) ||
1344 (bp
->b_ioflags
& BIO_ERROR
)) {
1345 bp
->b_flags
|= B_INVAL
;
1346 bp
->b_xflags
&= ~(BX_BKGRDWRITE
| BX_ALTDATA
);
1347 if (bp
->b_vflags
& BV_BKGRDINPROG
)
1348 panic("losing buffer 2");
1349 bp
->b_qindex
= QUEUE_CLEAN
;
1350 TAILQ_INSERT_HEAD(&bufqueues
[QUEUE_CLEAN
], bp
, b_freelist
);
1351 /* remaining buffers */
1353 if ((bp
->b_flags
& (B_DELWRI
|B_NEEDSGIANT
)) ==
1354 (B_DELWRI
|B_NEEDSGIANT
))
1355 bp
->b_qindex
= QUEUE_DIRTY_GIANT
;
1356 else if (bp
->b_flags
& B_DELWRI
)
1357 bp
->b_qindex
= QUEUE_DIRTY
;
1359 bp
->b_qindex
= QUEUE_CLEAN
;
1360 if (bp
->b_flags
& B_AGE
)
1361 TAILQ_INSERT_HEAD(&bufqueues
[bp
->b_qindex
], bp
, b_freelist
);
1363 TAILQ_INSERT_TAIL(&bufqueues
[bp
->b_qindex
], bp
, b_freelist
);
1365 mtx_unlock(&bqlock
);
1368 * If B_INVAL and B_DELWRI is set, clear B_DELWRI. We have already
1369 * placed the buffer on the correct queue. We must also disassociate
1370 * the device and vnode for a B_INVAL buffer so gbincore() doesn't
1373 if (bp
->b_flags
& B_INVAL
) {
1374 if (bp
->b_flags
& B_DELWRI
)
1381 * Fixup numfreebuffers count. The bp is on an appropriate queue
1382 * unless locked. We then bump numfreebuffers if it is not B_DELWRI.
1383 * We've already handled the B_INVAL case ( B_DELWRI will be clear
1384 * if B_INVAL is set ).
1387 if (!(bp
->b_flags
& B_DELWRI
))
1391 * Something we can maybe free or reuse
1393 if (bp
->b_bufsize
|| bp
->b_kvasize
)
1396 bp
->b_flags
&= ~(B_ASYNC
| B_NOCACHE
| B_AGE
| B_RELBUF
| B_DIRECT
);
1397 if ((bp
->b_flags
& B_DELWRI
) == 0 && (bp
->b_xflags
& BX_VNDIRTY
))
1398 panic("brelse: not dirty");
1404 * Release a buffer back to the appropriate queue but do not try to free
1405 * it. The buffer is expected to be used again soon.
1407 * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
1408 * biodone() to requeue an async I/O on completion. It is also used when
1409 * known good buffers need to be requeued but we think we may need the data
1412 * XXX we should be able to leave the B_RELBUF hint set on completion.
1415 bqrelse(struct buf
*bp
)
1417 CTR3(KTR_BUF
, "bqrelse(%p) vp %p flags %X", bp
, bp
->b_vp
, bp
->b_flags
);
1418 KASSERT(!(bp
->b_flags
& (B_CLUSTER
|B_PAGING
)),
1419 ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp
));
1421 if (BUF_LOCKRECURSED(bp
)) {
1422 /* do not release to free list */
1427 if (bp
->b_flags
& B_MANAGED
) {
1428 if (bp
->b_flags
& B_REMFREE
) {
1431 mtx_unlock(&bqlock
);
1433 bp
->b_flags
&= ~(B_ASYNC
| B_NOCACHE
| B_AGE
| B_RELBUF
);
1439 /* Handle delayed bremfree() processing. */
1440 if (bp
->b_flags
& B_REMFREE
)
1442 if (bp
->b_qindex
!= QUEUE_NONE
)
1443 panic("bqrelse: free buffer onto another queue???");
1444 /* buffers with stale but valid contents */
1445 if (bp
->b_flags
& B_DELWRI
) {
1446 if (bp
->b_flags
& B_NEEDSGIANT
)
1447 bp
->b_qindex
= QUEUE_DIRTY_GIANT
;
1449 bp
->b_qindex
= QUEUE_DIRTY
;
1450 TAILQ_INSERT_TAIL(&bufqueues
[bp
->b_qindex
], bp
, b_freelist
);
1453 * The locking of the BO_LOCK for checking of the
1454 * BV_BKGRDINPROG is not necessary since the
1455 * BV_BKGRDINPROG cannot be set while we hold the buf
1456 * lock, it can only be cleared if it is already
1459 if (!vm_page_count_severe() || (bp
->b_vflags
& BV_BKGRDINPROG
)) {
1460 bp
->b_qindex
= QUEUE_CLEAN
;
1461 TAILQ_INSERT_TAIL(&bufqueues
[QUEUE_CLEAN
], bp
,
1465 * We are too low on memory, we have to try to free
1466 * the buffer (most importantly: the wired pages
1467 * making up its backing store) *now*.
1469 mtx_unlock(&bqlock
);
1474 mtx_unlock(&bqlock
);
1476 if ((bp
->b_flags
& B_INVAL
) || !(bp
->b_flags
& B_DELWRI
))
1480 * Something we can maybe free or reuse.
1482 if (bp
->b_bufsize
&& !(bp
->b_flags
& B_DELWRI
))
1485 bp
->b_flags
&= ~(B_ASYNC
| B_NOCACHE
| B_AGE
| B_RELBUF
);
1486 if ((bp
->b_flags
& B_DELWRI
) == 0 && (bp
->b_xflags
& BX_VNDIRTY
))
1487 panic("bqrelse: not dirty");
1492 /* Give pages used by the bp back to the VM system (where possible) */
1494 vfs_vmio_release(struct buf
*bp
)
1499 VM_OBJECT_LOCK(bp
->b_bufobj
->bo_object
);
1500 vm_page_lock_queues();
1501 for (i
= 0; i
< bp
->b_npages
; i
++) {
1503 bp
->b_pages
[i
] = NULL
;
1505 * In order to keep page LRU ordering consistent, put
1506 * everything on the inactive queue.
1508 vm_page_unwire(m
, 0);
1510 * We don't mess with busy pages, it is
1511 * the responsibility of the process that
1512 * busied the pages to deal with them.
1514 if ((m
->oflags
& VPO_BUSY
) || (m
->busy
!= 0))
1517 if (m
->wire_count
== 0) {
1519 * Might as well free the page if we can and it has
1520 * no valid data. We also free the page if the
1521 * buffer was used for direct I/O
1523 if ((bp
->b_flags
& B_ASYNC
) == 0 && !m
->valid
&&
1524 m
->hold_count
== 0) {
1526 } else if (bp
->b_flags
& B_DIRECT
) {
1527 vm_page_try_to_free(m
);
1528 } else if (vm_page_count_severe()) {
1529 vm_page_try_to_cache(m
);
1533 vm_page_unlock_queues();
1534 VM_OBJECT_UNLOCK(bp
->b_bufobj
->bo_object
);
1535 pmap_qremove(trunc_page((vm_offset_t
) bp
->b_data
), bp
->b_npages
);
1537 if (bp
->b_bufsize
) {
1542 bp
->b_flags
&= ~B_VMIO
;
1548 * Check to see if a block at a particular lbn is available for a clustered
1552 vfs_bio_clcheck(struct vnode
*vp
, int size
, daddr_t lblkno
, daddr_t blkno
)
1559 /* If the buf isn't in core skip it */
1560 if ((bpa
= gbincore(&vp
->v_bufobj
, lblkno
)) == NULL
)
1563 /* If the buf is busy we don't want to wait for it */
1564 if (BUF_LOCK(bpa
, LK_EXCLUSIVE
| LK_NOWAIT
, NULL
) != 0)
1567 /* Only cluster with valid clusterable delayed write buffers */
1568 if ((bpa
->b_flags
& (B_DELWRI
| B_CLUSTEROK
| B_INVAL
)) !=
1569 (B_DELWRI
| B_CLUSTEROK
))
1572 if (bpa
->b_bufsize
!= size
)
1576 * Check to see if it is in the expected place on disk and that the
1577 * block has been mapped.
1579 if ((bpa
->b_blkno
!= bpa
->b_lblkno
) && (bpa
->b_blkno
== blkno
))
1589 * Implement clustered async writes for clearing out B_DELWRI buffers.
1590 * This is much better then the old way of writing only one buffer at
1591 * a time. Note that we may not be presented with the buffers in the
1592 * correct order, so we search for the cluster in both directions.
1595 vfs_bio_awrite(struct buf
*bp
)
1600 daddr_t lblkno
= bp
->b_lblkno
;
1601 struct vnode
*vp
= bp
->b_vp
;
1609 * right now we support clustered writing only to regular files. If
1610 * we find a clusterable block we could be in the middle of a cluster
1611 * rather then at the beginning.
1613 if ((vp
->v_type
== VREG
) &&
1614 (vp
->v_mount
!= 0) && /* Only on nodes that have the size info */
1615 (bp
->b_flags
& (B_CLUSTEROK
| B_INVAL
)) == B_CLUSTEROK
) {
1617 size
= vp
->v_mount
->mnt_stat
.f_iosize
;
1618 maxcl
= MAXPHYS
/ size
;
1621 for (i
= 1; i
< maxcl
; i
++)
1622 if (vfs_bio_clcheck(vp
, size
, lblkno
+ i
,
1623 bp
->b_blkno
+ ((i
* size
) >> DEV_BSHIFT
)) == 0)
1626 for (j
= 1; i
+ j
<= maxcl
&& j
<= lblkno
; j
++)
1627 if (vfs_bio_clcheck(vp
, size
, lblkno
- j
,
1628 bp
->b_blkno
- ((j
* size
) >> DEV_BSHIFT
)) == 0)
1634 * this is a possible cluster write
1638 nwritten
= cluster_wbuild(vp
, size
, lblkno
- j
, ncl
);
1643 bp
->b_flags
|= B_ASYNC
;
1645 * default (old) behavior, writing out only one block
1647 * XXX returns b_bufsize instead of b_bcount for nwritten?
1649 nwritten
= bp
->b_bufsize
;
1658 * Find and initialize a new buffer header, freeing up existing buffers
1659 * in the bufqueues as necessary. The new buffer is returned locked.
1661 * Important: B_INVAL is not set. If the caller wishes to throw the
1662 * buffer away, the caller must set B_INVAL prior to calling brelse().
1665 * We have insufficient buffer headers
1666 * We have insufficient buffer space
1667 * buffer_map is too fragmented ( space reservation fails )
1668 * If we have to flush dirty buffers ( but we try to avoid this )
1670 * To avoid VFS layer recursion we do not flush dirty buffers ourselves.
1671 * Instead we ask the buf daemon to do it for us. We attempt to
1672 * avoid piecemeal wakeups of the pageout daemon.
1676 getnewbuf(int slpflag
, int slptimeo
, int size
, int maxsize
)
1682 static int flushingbufs
;
1685 * We can't afford to block since we might be holding a vnode lock,
1686 * which may prevent system daemons from running. We deal with
1687 * low-memory situations by proactively returning memory and running
1688 * async I/O rather then sync I/O.
1691 atomic_add_int(&getnewbufcalls
, 1);
1692 atomic_subtract_int(&getnewbufrestarts
, 1);
1694 atomic_add_int(&getnewbufrestarts
, 1);
1697 * Setup for scan. If we do not have enough free buffers,
1698 * we setup a degenerate case that immediately fails. Note
1699 * that if we are specially marked process, we are allowed to
1700 * dip into our reserves.
1702 * The scanning sequence is nominally: EMPTY->EMPTYKVA->CLEAN
1704 * We start with EMPTYKVA. If the list is empty we backup to EMPTY.
1705 * However, there are a number of cases (defragging, reusing, ...)
1706 * where we cannot backup.
1709 nqindex
= QUEUE_EMPTYKVA
;
1710 nbp
= TAILQ_FIRST(&bufqueues
[QUEUE_EMPTYKVA
]);
1714 * If no EMPTYKVA buffers and we are either
1715 * defragging or reusing, locate a CLEAN buffer
1716 * to free or reuse. If bufspace useage is low
1717 * skip this step so we can allocate a new buffer.
1719 if (defrag
|| bufspace
>= lobufspace
) {
1720 nqindex
= QUEUE_CLEAN
;
1721 nbp
= TAILQ_FIRST(&bufqueues
[QUEUE_CLEAN
]);
1725 * If we could not find or were not allowed to reuse a
1726 * CLEAN buffer, check to see if it is ok to use an EMPTY
1727 * buffer. We can only use an EMPTY buffer if allocating
1728 * its KVA would not otherwise run us out of buffer space.
1730 if (nbp
== NULL
&& defrag
== 0 &&
1731 bufspace
+ maxsize
< hibufspace
) {
1732 nqindex
= QUEUE_EMPTY
;
1733 nbp
= TAILQ_FIRST(&bufqueues
[QUEUE_EMPTY
]);
1738 * Run scan, possibly freeing data and/or kva mappings on the fly
1742 while ((bp
= nbp
) != NULL
) {
1743 int qindex
= nqindex
;
1746 * Calculate next bp ( we can only use it if we do not block
1747 * or do other fancy things ).
1749 if ((nbp
= TAILQ_NEXT(bp
, b_freelist
)) == NULL
) {
1752 nqindex
= QUEUE_EMPTYKVA
;
1753 if ((nbp
= TAILQ_FIRST(&bufqueues
[QUEUE_EMPTYKVA
])))
1756 case QUEUE_EMPTYKVA
:
1757 nqindex
= QUEUE_CLEAN
;
1758 if ((nbp
= TAILQ_FIRST(&bufqueues
[QUEUE_CLEAN
])))
1769 * If we are defragging then we need a buffer with
1770 * b_kvasize != 0. XXX this situation should no longer
1771 * occur, if defrag is non-zero the buffer's b_kvasize
1772 * should also be non-zero at this point. XXX
1774 if (defrag
&& bp
->b_kvasize
== 0) {
1775 printf("Warning: defrag empty buffer %p\n", bp
);
1780 * Start freeing the bp. This is somewhat involved. nbp
1781 * remains valid only for QUEUE_EMPTY[KVA] bp's.
1783 if (BUF_LOCK(bp
, LK_EXCLUSIVE
| LK_NOWAIT
, NULL
) != 0)
1786 BO_LOCK(bp
->b_bufobj
);
1787 if (bp
->b_vflags
& BV_BKGRDINPROG
) {
1788 BO_UNLOCK(bp
->b_bufobj
);
1792 BO_UNLOCK(bp
->b_bufobj
);
1795 "getnewbuf(%p) vp %p flags %X kvasize %d bufsize %d "
1796 "queue %d (recycling)", bp
, bp
->b_vp
, bp
->b_flags
,
1797 bp
->b_kvasize
, bp
->b_bufsize
, qindex
);
1802 KASSERT(bp
->b_qindex
== qindex
, ("getnewbuf: inconsistant queue %d bp %p", qindex
, bp
));
1805 * Note: we no longer distinguish between VMIO and non-VMIO
1809 KASSERT((bp
->b_flags
& B_DELWRI
) == 0, ("delwri buffer %p found in queue %d", bp
, qindex
));
1812 mtx_unlock(&bqlock
);
1814 if (qindex
== QUEUE_CLEAN
) {
1815 if (bp
->b_flags
& B_VMIO
) {
1816 bp
->b_flags
&= ~B_ASYNC
;
1817 vfs_vmio_release(bp
);
1824 * NOTE: nbp is now entirely invalid. We can only restart
1825 * the scan from this point on.
1827 * Get the rest of the buffer freed up. b_kva* is still
1828 * valid after this operation.
1831 if (bp
->b_rcred
!= NOCRED
) {
1832 crfree(bp
->b_rcred
);
1833 bp
->b_rcred
= NOCRED
;
1835 if (bp
->b_wcred
!= NOCRED
) {
1836 crfree(bp
->b_wcred
);
1837 bp
->b_wcred
= NOCRED
;
1839 if (!LIST_EMPTY(&bp
->b_dep
))
1841 if (bp
->b_vflags
& BV_BKGRDINPROG
)
1842 panic("losing buffer 3");
1843 KASSERT(bp
->b_vp
== NULL
,
1844 ("bp: %p still has vnode %p. qindex: %d",
1845 bp
, bp
->b_vp
, qindex
));
1846 KASSERT((bp
->b_xflags
& (BX_VNCLEAN
|BX_VNDIRTY
)) == 0,
1847 ("bp: %p still on a buffer list. xflags %X",
1858 bp
->b_blkno
= bp
->b_lblkno
= 0;
1859 bp
->b_offset
= NOOFFSET
;
1865 bp
->b_dirtyoff
= bp
->b_dirtyend
= 0;
1866 bp
->b_bufobj
= NULL
;
1867 bp
->b_pin_count
= 0;
1868 bp
->b_fsprivate1
= NULL
;
1869 bp
->b_fsprivate2
= NULL
;
1870 bp
->b_fsprivate3
= NULL
;
1872 LIST_INIT(&bp
->b_dep
);
1875 * If we are defragging then free the buffer.
1878 bp
->b_flags
|= B_INVAL
;
1886 * Notify any waiters for the buffer lock about
1887 * identity change by freeing the buffer.
1889 if (qindex
== QUEUE_CLEAN
&& BUF_LOCKWAITERS(bp
)) {
1890 bp
->b_flags
|= B_INVAL
;
1897 * If we are overcomitted then recover the buffer and its
1898 * KVM space. This occurs in rare situations when multiple
1899 * processes are blocked in getnewbuf() or allocbuf().
1901 if (bufspace
>= hibufspace
)
1903 if (flushingbufs
&& bp
->b_kvasize
!= 0) {
1904 bp
->b_flags
|= B_INVAL
;
1909 if (bufspace
< lobufspace
)
1915 * If we exhausted our list, sleep as appropriate. We may have to
1916 * wakeup various daemons and write out some dirty buffers.
1918 * Generally we are sleeping due to insufficient buffer space.
1926 flags
= VFS_BIO_NEED_BUFSPACE
;
1928 } else if (bufspace
>= hibufspace
) {
1930 flags
= VFS_BIO_NEED_BUFSPACE
;
1933 flags
= VFS_BIO_NEED_ANY
;
1936 needsbuffer
|= flags
;
1937 mtx_unlock(&nblock
);
1938 mtx_unlock(&bqlock
);
1940 bd_speedup(); /* heeeelp */
1943 while (needsbuffer
& flags
) {
1944 if (msleep(&needsbuffer
, &nblock
,
1945 (PRIBIO
+ 4) | slpflag
, waitmsg
, slptimeo
)) {
1946 mtx_unlock(&nblock
);
1950 mtx_unlock(&nblock
);
1953 * We finally have a valid bp. We aren't quite out of the
1954 * woods, we still have to reserve kva space. In order
1955 * to keep fragmentation sane we only allocate kva in
1958 maxsize
= (maxsize
+ BKVAMASK
) & ~BKVAMASK
;
1960 if (maxsize
!= bp
->b_kvasize
) {
1961 vm_offset_t addr
= 0;
1965 vm_map_lock(buffer_map
);
1966 if (vm_map_findspace(buffer_map
,
1967 vm_map_min(buffer_map
), maxsize
, &addr
)) {
1969 * Uh oh. Buffer map is to fragmented. We
1970 * must defragment the map.
1972 atomic_add_int(&bufdefragcnt
, 1);
1973 vm_map_unlock(buffer_map
);
1975 bp
->b_flags
|= B_INVAL
;
1980 vm_map_insert(buffer_map
, NULL
, 0,
1981 addr
, addr
+ maxsize
,
1982 VM_PROT_ALL
, VM_PROT_ALL
, MAP_NOFAULT
);
1984 bp
->b_kvabase
= (caddr_t
) addr
;
1985 bp
->b_kvasize
= maxsize
;
1986 atomic_add_int(&bufspace
, bp
->b_kvasize
);
1987 atomic_add_int(&bufreusecnt
, 1);
1989 vm_map_unlock(buffer_map
);
1991 bp
->b_saveaddr
= bp
->b_kvabase
;
1992 bp
->b_data
= bp
->b_saveaddr
;
2000 * buffer flushing daemon. Buffers are normally flushed by the
2001 * update daemon but if it cannot keep up this process starts to
2002 * take the load in an attempt to prevent getnewbuf() from blocking.
2005 static struct kproc_desc buf_kp
= {
2010 SYSINIT(bufdaemon
, SI_SUB_KTHREAD_BUF
, SI_ORDER_FIRST
, kproc_start
, &buf_kp
);
2017 * This process needs to be suspended prior to shutdown sync.
2019 EVENTHANDLER_REGISTER(shutdown_pre_sync
, kproc_shutdown
, bufdaemonproc
,
2023 * This process is allowed to take the buffer cache to the limit
2025 curthread
->td_pflags
|= TDP_NORUNNINGBUF
;
2029 mtx_unlock(&bdlock
);
2031 kproc_suspend_check(bufdaemonproc
);
2034 * Do the flush. Limit the amount of in-transit I/O we
2035 * allow to build up, otherwise we would completely saturate
2036 * the I/O system. Wakeup any waiting processes before we
2037 * normally would so they can run in parallel with our drain.
2039 while (numdirtybuffers
> lodirtybuffers
) {
2042 flushed
= flushbufqueues(QUEUE_DIRTY
, 0);
2043 /* The list empty check here is slightly racy */
2044 if (!TAILQ_EMPTY(&bufqueues
[QUEUE_DIRTY_GIANT
])) {
2046 flushed
+= flushbufqueues(QUEUE_DIRTY_GIANT
, 0);
2051 * Could not find any buffers without rollback
2052 * dependencies, so just write the first one
2053 * in the hopes of eventually making progress.
2055 flushbufqueues(QUEUE_DIRTY
, 1);
2057 &bufqueues
[QUEUE_DIRTY_GIANT
])) {
2059 flushbufqueues(QUEUE_DIRTY_GIANT
, 1);
2068 * Only clear bd_request if we have reached our low water
2069 * mark. The buf_daemon normally waits 1 second and
2070 * then incrementally flushes any dirty buffers that have
2071 * built up, within reason.
2073 * If we were unable to hit our low water mark and couldn't
2074 * find any flushable buffers, we sleep half a second.
2075 * Otherwise we loop immediately.
2078 if (numdirtybuffers
<= lodirtybuffers
) {
2080 * We reached our low water mark, reset the
2081 * request and sleep until we are needed again.
2082 * The sleep is just so the suspend code works.
2085 msleep(&bd_request
, &bdlock
, PVM
, "psleep", hz
);
2088 * We couldn't find any flushable dirty buffers but
2089 * still have too many dirty buffers, we
2090 * have to sleep and try again. (rare)
2092 msleep(&bd_request
, &bdlock
, PVM
, "qsleep", hz
/ 10);
2100 * Try to flush a buffer in the dirty queue. We must be careful to
2101 * free up B_INVAL buffers instead of write them, which NFS is
2102 * particularly sensitive to.
2104 static int flushwithdeps
= 0;
2105 SYSCTL_INT(_vfs
, OID_AUTO
, flushwithdeps
, CTLFLAG_RW
, &flushwithdeps
,
2106 0, "Number of buffers flushed with dependecies that require rollbacks");
2109 flushbufqueues(int queue
, int flushdeps
)
2111 struct buf sentinel
;
2119 target
= numdirtybuffers
- lodirtybuffers
;
2120 if (flushdeps
&& target
> 2)
2125 TAILQ_INSERT_TAIL(&bufqueues
[queue
], &sentinel
, b_freelist
);
2126 while (flushed
!= target
) {
2127 bp
= TAILQ_FIRST(&bufqueues
[queue
]);
2128 if (bp
== &sentinel
)
2130 TAILQ_REMOVE(&bufqueues
[queue
], bp
, b_freelist
);
2131 TAILQ_INSERT_TAIL(&bufqueues
[queue
], bp
, b_freelist
);
2133 if (BUF_LOCK(bp
, LK_EXCLUSIVE
| LK_NOWAIT
, NULL
) != 0)
2135 if (bp
->b_pin_count
> 0) {
2139 BO_LOCK(bp
->b_bufobj
);
2140 if ((bp
->b_vflags
& BV_BKGRDINPROG
) != 0 ||
2141 (bp
->b_flags
& B_DELWRI
) == 0) {
2142 BO_UNLOCK(bp
->b_bufobj
);
2146 BO_UNLOCK(bp
->b_bufobj
);
2147 if (bp
->b_flags
& B_INVAL
) {
2149 mtx_unlock(&bqlock
);
2152 numdirtywakeup((lodirtybuffers
+ hidirtybuffers
) / 2);
2157 if (!LIST_EMPTY(&bp
->b_dep
) && buf_countdeps(bp
, 0)) {
2158 if (flushdeps
== 0) {
2166 * We must hold the lock on a vnode before writing
2167 * one of its buffers. Otherwise we may confuse, or
2168 * in the case of a snapshot vnode, deadlock the
2171 * The lock order here is the reverse of the normal
2172 * of vnode followed by buf lock. This is ok because
2173 * the NOWAIT will prevent deadlock.
2176 if (vn_start_write(vp
, &mp
, V_NOWAIT
) != 0) {
2180 if (vn_lock(vp
, LK_EXCLUSIVE
| LK_NOWAIT
) == 0) {
2181 mtx_unlock(&bqlock
);
2182 CTR3(KTR_BUF
, "flushbufqueue(%p) vp %p flags %X",
2183 bp
, bp
->b_vp
, bp
->b_flags
);
2185 vn_finished_write(mp
);
2187 flushwithdeps
+= hasdeps
;
2189 waitrunningbufspace();
2190 numdirtywakeup((lodirtybuffers
+ hidirtybuffers
) / 2);
2194 vn_finished_write(mp
);
2197 TAILQ_REMOVE(&bufqueues
[queue
], &sentinel
, b_freelist
);
2198 mtx_unlock(&bqlock
);
2203 * Check to see if a block is currently memory resident.
2206 incore(struct bufobj
*bo
, daddr_t blkno
)
2211 bp
= gbincore(bo
, blkno
);
2217 * Returns true if no I/O is needed to access the
2218 * associated VM object. This is like incore except
2219 * it also hunts around in the VM system for the data.
2223 inmem(struct vnode
* vp
, daddr_t blkno
)
2226 vm_offset_t toff
, tinc
, size
;
2230 ASSERT_VOP_LOCKED(vp
, "inmem");
2232 if (incore(&vp
->v_bufobj
, blkno
))
2234 if (vp
->v_mount
== NULL
)
2241 if (size
> vp
->v_mount
->mnt_stat
.f_iosize
)
2242 size
= vp
->v_mount
->mnt_stat
.f_iosize
;
2243 off
= (vm_ooffset_t
)blkno
* (vm_ooffset_t
)vp
->v_mount
->mnt_stat
.f_iosize
;
2245 VM_OBJECT_LOCK(obj
);
2246 for (toff
= 0; toff
< vp
->v_mount
->mnt_stat
.f_iosize
; toff
+= tinc
) {
2247 m
= vm_page_lookup(obj
, OFF_TO_IDX(off
+ toff
));
2251 if (tinc
> PAGE_SIZE
- ((toff
+ off
) & PAGE_MASK
))
2252 tinc
= PAGE_SIZE
- ((toff
+ off
) & PAGE_MASK
);
2253 if (vm_page_is_valid(m
,
2254 (vm_offset_t
) ((toff
+ off
) & PAGE_MASK
), tinc
) == 0)
2257 VM_OBJECT_UNLOCK(obj
);
2261 VM_OBJECT_UNLOCK(obj
);
2268 * Sets the dirty range for a buffer based on the status of the dirty
2269 * bits in the pages comprising the buffer.
2271 * The range is limited to the size of the buffer.
2273 * This routine is primarily used by NFS, but is generalized for the
2277 vfs_setdirty(struct buf
*bp
)
2281 * Degenerate case - empty buffer
2284 if (bp
->b_bufsize
== 0)
2288 * We qualify the scan for modified pages on whether the
2289 * object has been flushed yet.
2292 if ((bp
->b_flags
& B_VMIO
) == 0)
2295 VM_OBJECT_LOCK(bp
->b_bufobj
->bo_object
);
2296 vfs_setdirty_locked_object(bp
);
2297 VM_OBJECT_UNLOCK(bp
->b_bufobj
->bo_object
);
2301 vfs_setdirty_locked_object(struct buf
*bp
)
2306 object
= bp
->b_bufobj
->bo_object
;
2307 VM_OBJECT_LOCK_ASSERT(object
, MA_OWNED
);
2308 if (object
->flags
& (OBJ_MIGHTBEDIRTY
|OBJ_CLEANING
)) {
2309 vm_offset_t boffset
;
2310 vm_offset_t eoffset
;
2312 vm_page_lock_queues();
2314 * test the pages to see if they have been modified directly
2315 * by users through the VM system.
2317 for (i
= 0; i
< bp
->b_npages
; i
++)
2318 vm_page_test_dirty(bp
->b_pages
[i
]);
2321 * Calculate the encompassing dirty range, boffset and eoffset,
2322 * (eoffset - boffset) bytes.
2325 for (i
= 0; i
< bp
->b_npages
; i
++) {
2326 if (bp
->b_pages
[i
]->dirty
)
2329 boffset
= (i
<< PAGE_SHIFT
) - (bp
->b_offset
& PAGE_MASK
);
2331 for (i
= bp
->b_npages
- 1; i
>= 0; --i
) {
2332 if (bp
->b_pages
[i
]->dirty
) {
2336 eoffset
= ((i
+ 1) << PAGE_SHIFT
) - (bp
->b_offset
& PAGE_MASK
);
2338 vm_page_unlock_queues();
2340 * Fit it to the buffer.
2343 if (eoffset
> bp
->b_bcount
)
2344 eoffset
= bp
->b_bcount
;
2347 * If we have a good dirty range, merge with the existing
2351 if (boffset
< eoffset
) {
2352 if (bp
->b_dirtyoff
> boffset
)
2353 bp
->b_dirtyoff
= boffset
;
2354 if (bp
->b_dirtyend
< eoffset
)
2355 bp
->b_dirtyend
= eoffset
;
2363 * Get a block given a specified block and offset into a file/device.
2364 * The buffers B_DONE bit will be cleared on return, making it almost
2365 * ready for an I/O initiation. B_INVAL may or may not be set on
2366 * return. The caller should clear B_INVAL prior to initiating a
2369 * For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
2370 * an existing buffer.
2372 * For a VMIO buffer, B_CACHE is modified according to the backing VM.
2373 * If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
2374 * and then cleared based on the backing VM. If the previous buffer is
2375 * non-0-sized but invalid, B_CACHE will be cleared.
2377 * If getblk() must create a new buffer, the new buffer is returned with
2378 * both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
2379 * case it is returned with B_INVAL clear and B_CACHE set based on the
2382 * getblk() also forces a bwrite() for any B_DELWRI buffer whos
2383 * B_CACHE bit is clear.
2385 * What this means, basically, is that the caller should use B_CACHE to
2386 * determine whether the buffer is fully valid or not and should clear
2387 * B_INVAL prior to issuing a read. If the caller intends to validate
2388 * the buffer by loading its data area with something, the caller needs
2389 * to clear B_INVAL. If the caller does this without issuing an I/O,
2390 * the caller should set B_CACHE ( as an optimization ), else the caller
2391 * should issue the I/O and biodone() will set B_CACHE if the I/O was
2392 * a write attempt or if it was a successfull read. If the caller
2393 * intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
2394 * prior to issuing the READ. biodone() will *not* clear B_INVAL.
2397 getblk(struct vnode
* vp
, daddr_t blkno
, int size
, int slpflag
, int slptimeo
,
2404 CTR3(KTR_BUF
, "getblk(%p, %ld, %d)", vp
, (long)blkno
, size
);
2405 ASSERT_VOP_LOCKED(vp
, "getblk");
2406 if (size
> MAXBSIZE
)
2407 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size
, MAXBSIZE
);
2412 * Block if we are low on buffers. Certain processes are allowed
2413 * to completely exhaust the buffer cache.
2415 * If this check ever becomes a bottleneck it may be better to
2416 * move it into the else, when gbincore() fails. At the moment
2417 * it isn't a problem.
2419 * XXX remove if 0 sections (clean this up after its proven)
2421 if (numfreebuffers
== 0) {
2422 if (TD_IS_IDLETHREAD(curthread
))
2425 needsbuffer
|= VFS_BIO_NEED_ANY
;
2426 mtx_unlock(&nblock
);
2430 bp
= gbincore(bo
, blkno
);
2434 * Buffer is in-core. If the buffer is not busy, it must
2437 lockflags
= LK_EXCLUSIVE
| LK_SLEEPFAIL
| LK_INTERLOCK
;
2439 if (flags
& GB_LOCK_NOWAIT
)
2440 lockflags
|= LK_NOWAIT
;
2442 error
= BUF_TIMELOCK(bp
, lockflags
,
2443 BO_MTX(bo
), "getblk", slpflag
, slptimeo
);
2446 * If we slept and got the lock we have to restart in case
2447 * the buffer changed identities.
2449 if (error
== ENOLCK
)
2451 /* We timed out or were interrupted. */
2456 * The buffer is locked. B_CACHE is cleared if the buffer is
2457 * invalid. Otherwise, for a non-VMIO buffer, B_CACHE is set
2458 * and for a VMIO buffer B_CACHE is adjusted according to the
2461 if (bp
->b_flags
& B_INVAL
)
2462 bp
->b_flags
&= ~B_CACHE
;
2463 else if ((bp
->b_flags
& (B_VMIO
| B_INVAL
)) == 0)
2464 bp
->b_flags
|= B_CACHE
;
2468 * check for size inconsistancies for non-VMIO case.
2471 if (bp
->b_bcount
!= size
) {
2472 if ((bp
->b_flags
& B_VMIO
) == 0 ||
2473 (size
> bp
->b_kvasize
)) {
2474 if (bp
->b_flags
& B_DELWRI
) {
2476 * If buffer is pinned and caller does
2477 * not want sleep waiting for it to be
2478 * unpinned, bail out
2480 if (bp
->b_pin_count
> 0) {
2481 if (flags
& GB_LOCK_NOWAIT
) {
2488 bp
->b_flags
|= B_NOCACHE
;
2491 if (LIST_EMPTY(&bp
->b_dep
)) {
2492 bp
->b_flags
|= B_RELBUF
;
2495 bp
->b_flags
|= B_NOCACHE
;
2504 * If the size is inconsistant in the VMIO case, we can resize
2505 * the buffer. This might lead to B_CACHE getting set or
2506 * cleared. If the size has not changed, B_CACHE remains
2507 * unchanged from its previous state.
2510 if (bp
->b_bcount
!= size
)
2513 KASSERT(bp
->b_offset
!= NOOFFSET
,
2514 ("getblk: no buffer offset"));
2517 * A buffer with B_DELWRI set and B_CACHE clear must
2518 * be committed before we can return the buffer in
2519 * order to prevent the caller from issuing a read
2520 * ( due to B_CACHE not being set ) and overwriting
2523 * Most callers, including NFS and FFS, need this to
2524 * operate properly either because they assume they
2525 * can issue a read if B_CACHE is not set, or because
2526 * ( for example ) an uncached B_DELWRI might loop due
2527 * to softupdates re-dirtying the buffer. In the latter
2528 * case, B_CACHE is set after the first write completes,
2529 * preventing further loops.
2530 * NOTE! b*write() sets B_CACHE. If we cleared B_CACHE
2531 * above while extending the buffer, we cannot allow the
2532 * buffer to remain with B_CACHE set after the write
2533 * completes or it will represent a corrupt state. To
2534 * deal with this we set B_NOCACHE to scrap the buffer
2537 * We might be able to do something fancy, like setting
2538 * B_CACHE in bwrite() except if B_DELWRI is already set,
2539 * so the below call doesn't set B_CACHE, but that gets real
2540 * confusing. This is much easier.
2543 if ((bp
->b_flags
& (B_CACHE
|B_DELWRI
)) == B_DELWRI
) {
2544 bp
->b_flags
|= B_NOCACHE
;
2548 bp
->b_flags
&= ~B_DONE
;
2550 int bsize
, maxsize
, vmio
;
2554 * Buffer is not in-core, create new buffer. The buffer
2555 * returned by getnewbuf() is locked. Note that the returned
2556 * buffer is also considered valid (not marked B_INVAL).
2560 * If the user does not want us to create the buffer, bail out
2563 if (flags
& GB_NOCREAT
)
2565 bsize
= bo
->bo_bsize
;
2566 offset
= blkno
* bsize
;
2567 vmio
= vp
->v_object
!= NULL
;
2568 maxsize
= vmio
? size
+ (offset
& PAGE_MASK
) : size
;
2569 maxsize
= imax(maxsize
, bsize
);
2571 bp
= getnewbuf(slpflag
, slptimeo
, size
, maxsize
);
2573 if (slpflag
|| slptimeo
)
2579 * This code is used to make sure that a buffer is not
2580 * created while the getnewbuf routine is blocked.
2581 * This can be a problem whether the vnode is locked or not.
2582 * If the buffer is created out from under us, we have to
2583 * throw away the one we just created.
2585 * Note: this must occur before we associate the buffer
2586 * with the vp especially considering limitations in
2587 * the splay tree implementation when dealing with duplicate
2591 if (gbincore(bo
, blkno
)) {
2593 bp
->b_flags
|= B_INVAL
;
2599 * Insert the buffer into the hash, so that it can
2600 * be found by incore.
2602 bp
->b_blkno
= bp
->b_lblkno
= blkno
;
2603 bp
->b_offset
= offset
;
2608 * set B_VMIO bit. allocbuf() the buffer bigger. Since the
2609 * buffer size starts out as 0, B_CACHE will be set by
2610 * allocbuf() for the VMIO case prior to it testing the
2611 * backing store for validity.
2615 bp
->b_flags
|= B_VMIO
;
2616 #if defined(VFS_BIO_DEBUG)
2617 if (vn_canvmio(vp
) != TRUE
)
2618 printf("getblk: VMIO on vnode type %d\n",
2621 KASSERT(vp
->v_object
== bp
->b_bufobj
->bo_object
,
2622 ("ARGH! different b_bufobj->bo_object %p %p %p\n",
2623 bp
, vp
->v_object
, bp
->b_bufobj
->bo_object
));
2625 bp
->b_flags
&= ~B_VMIO
;
2626 KASSERT(bp
->b_bufobj
->bo_object
== NULL
,
2627 ("ARGH! has b_bufobj->bo_object %p %p\n",
2628 bp
, bp
->b_bufobj
->bo_object
));
2632 bp
->b_flags
&= ~B_DONE
;
2634 CTR4(KTR_BUF
, "getblk(%p, %ld, %d) = %p", vp
, (long)blkno
, size
, bp
);
2635 BUF_ASSERT_HELD(bp
);
2636 KASSERT(bp
->b_bufobj
== bo
,
2637 ("bp %p wrong b_bufobj %p should be %p", bp
, bp
->b_bufobj
, bo
));
2642 * Get an empty, disassociated buffer of given size. The buffer is initially
2651 maxsize
= (size
+ BKVAMASK
) & ~BKVAMASK
;
2652 while ((bp
= getnewbuf(0, 0, size
, maxsize
)) == 0)
2655 bp
->b_flags
|= B_INVAL
; /* b_dep cleared by getnewbuf() */
2656 BUF_ASSERT_HELD(bp
);
2662 * This code constitutes the buffer memory from either anonymous system
2663 * memory (in the case of non-VMIO operations) or from an associated
2664 * VM object (in the case of VMIO operations). This code is able to
2665 * resize a buffer up or down.
2667 * Note that this code is tricky, and has many complications to resolve
2668 * deadlock or inconsistant data situations. Tread lightly!!!
2669 * There are B_CACHE and B_DELWRI interactions that must be dealt with by
2670 * the caller. Calling this code willy nilly can result in the loss of data.
2672 * allocbuf() only adjusts B_CACHE for VMIO buffers. getblk() deals with
2673 * B_CACHE for the non-VMIO case.
2677 allocbuf(struct buf
*bp
, int size
)
2679 int newbsize
, mbsize
;
2682 BUF_ASSERT_HELD(bp
);
2684 if (bp
->b_kvasize
< size
)
2685 panic("allocbuf: buffer too small");
2687 if ((bp
->b_flags
& B_VMIO
) == 0) {
2691 * Just get anonymous memory from the kernel. Don't
2692 * mess with B_CACHE.
2694 mbsize
= (size
+ DEV_BSIZE
- 1) & ~(DEV_BSIZE
- 1);
2695 if (bp
->b_flags
& B_MALLOC
)
2698 newbsize
= round_page(size
);
2700 if (newbsize
< bp
->b_bufsize
) {
2702 * malloced buffers are not shrunk
2704 if (bp
->b_flags
& B_MALLOC
) {
2706 bp
->b_bcount
= size
;
2708 free(bp
->b_data
, M_BIOBUF
);
2709 if (bp
->b_bufsize
) {
2710 atomic_subtract_int(
2716 bp
->b_saveaddr
= bp
->b_kvabase
;
2717 bp
->b_data
= bp
->b_saveaddr
;
2719 bp
->b_flags
&= ~B_MALLOC
;
2725 (vm_offset_t
) bp
->b_data
+ newbsize
,
2726 (vm_offset_t
) bp
->b_data
+ bp
->b_bufsize
);
2727 } else if (newbsize
> bp
->b_bufsize
) {
2729 * We only use malloced memory on the first allocation.
2730 * and revert to page-allocated memory when the buffer
2734 * There is a potential smp race here that could lead
2735 * to bufmallocspace slightly passing the max. It
2736 * is probably extremely rare and not worth worrying
2739 if ( (bufmallocspace
< maxbufmallocspace
) &&
2740 (bp
->b_bufsize
== 0) &&
2741 (mbsize
<= PAGE_SIZE
/2)) {
2743 bp
->b_data
= malloc(mbsize
, M_BIOBUF
, M_WAITOK
);
2744 bp
->b_bufsize
= mbsize
;
2745 bp
->b_bcount
= size
;
2746 bp
->b_flags
|= B_MALLOC
;
2747 atomic_add_int(&bufmallocspace
, mbsize
);
2753 * If the buffer is growing on its other-than-first allocation,
2754 * then we revert to the page-allocation scheme.
2756 if (bp
->b_flags
& B_MALLOC
) {
2757 origbuf
= bp
->b_data
;
2758 origbufsize
= bp
->b_bufsize
;
2759 bp
->b_data
= bp
->b_kvabase
;
2760 if (bp
->b_bufsize
) {
2761 atomic_subtract_int(&bufmallocspace
,
2766 bp
->b_flags
&= ~B_MALLOC
;
2767 newbsize
= round_page(newbsize
);
2771 (vm_offset_t
) bp
->b_data
+ bp
->b_bufsize
,
2772 (vm_offset_t
) bp
->b_data
+ newbsize
);
2774 bcopy(origbuf
, bp
->b_data
, origbufsize
);
2775 free(origbuf
, M_BIOBUF
);
2781 newbsize
= (size
+ DEV_BSIZE
- 1) & ~(DEV_BSIZE
- 1);
2782 desiredpages
= (size
== 0) ? 0 :
2783 num_pages((bp
->b_offset
& PAGE_MASK
) + newbsize
);
2785 if (bp
->b_flags
& B_MALLOC
)
2786 panic("allocbuf: VMIO buffer can't be malloced");
2788 * Set B_CACHE initially if buffer is 0 length or will become
2791 if (size
== 0 || bp
->b_bufsize
== 0)
2792 bp
->b_flags
|= B_CACHE
;
2794 if (newbsize
< bp
->b_bufsize
) {
2796 * DEV_BSIZE aligned new buffer size is less then the
2797 * DEV_BSIZE aligned existing buffer size. Figure out
2798 * if we have to remove any pages.
2800 if (desiredpages
< bp
->b_npages
) {
2803 VM_OBJECT_LOCK(bp
->b_bufobj
->bo_object
);
2804 vm_page_lock_queues();
2805 for (i
= desiredpages
; i
< bp
->b_npages
; i
++) {
2807 * the page is not freed here -- it
2808 * is the responsibility of
2809 * vnode_pager_setsize
2812 KASSERT(m
!= bogus_page
,
2813 ("allocbuf: bogus page found"));
2814 while (vm_page_sleep_if_busy(m
, TRUE
, "biodep"))
2815 vm_page_lock_queues();
2817 bp
->b_pages
[i
] = NULL
;
2818 vm_page_unwire(m
, 0);
2820 vm_page_unlock_queues();
2821 VM_OBJECT_UNLOCK(bp
->b_bufobj
->bo_object
);
2822 pmap_qremove((vm_offset_t
) trunc_page((vm_offset_t
)bp
->b_data
) +
2823 (desiredpages
<< PAGE_SHIFT
), (bp
->b_npages
- desiredpages
));
2824 bp
->b_npages
= desiredpages
;
2826 } else if (size
> bp
->b_bcount
) {
2828 * We are growing the buffer, possibly in a
2829 * byte-granular fashion.
2837 * Step 1, bring in the VM pages from the object,
2838 * allocating them if necessary. We must clear
2839 * B_CACHE if these pages are not valid for the
2840 * range covered by the buffer.
2844 obj
= bp
->b_bufobj
->bo_object
;
2846 VM_OBJECT_LOCK(obj
);
2847 while (bp
->b_npages
< desiredpages
) {
2851 pi
= OFF_TO_IDX(bp
->b_offset
) + bp
->b_npages
;
2852 if ((m
= vm_page_lookup(obj
, pi
)) == NULL
) {
2854 * note: must allocate system pages
2855 * since blocking here could intefere
2856 * with paging I/O, no matter which
2859 m
= vm_page_alloc(obj
, pi
,
2860 VM_ALLOC_NOBUSY
| VM_ALLOC_SYSTEM
|
2863 atomic_add_int(&vm_pageout_deficit
,
2864 desiredpages
- bp
->b_npages
);
2865 VM_OBJECT_UNLOCK(obj
);
2867 VM_OBJECT_LOCK(obj
);
2870 bp
->b_flags
&= ~B_CACHE
;
2871 bp
->b_pages
[bp
->b_npages
] = m
;
2878 * We found a page. If we have to sleep on it,
2879 * retry because it might have gotten freed out
2882 * We can only test VPO_BUSY here. Blocking on
2883 * m->busy might lead to a deadlock:
2885 * vm_fault->getpages->cluster_read->allocbuf
2888 if (vm_page_sleep_if_busy(m
, FALSE
, "pgtblk"))
2892 * We have a good page.
2894 vm_page_lock_queues();
2896 vm_page_unlock_queues();
2897 bp
->b_pages
[bp
->b_npages
] = m
;
2902 * Step 2. We've loaded the pages into the buffer,
2903 * we have to figure out if we can still have B_CACHE
2904 * set. Note that B_CACHE is set according to the
2905 * byte-granular range ( bcount and size ), new the
2906 * aligned range ( newbsize ).
2908 * The VM test is against m->valid, which is DEV_BSIZE
2909 * aligned. Needless to say, the validity of the data
2910 * needs to also be DEV_BSIZE aligned. Note that this
2911 * fails with NFS if the server or some other client
2912 * extends the file's EOF. If our buffer is resized,
2913 * B_CACHE may remain set! XXX
2916 toff
= bp
->b_bcount
;
2917 tinc
= PAGE_SIZE
- ((bp
->b_offset
+ toff
) & PAGE_MASK
);
2919 while ((bp
->b_flags
& B_CACHE
) && toff
< size
) {
2922 if (tinc
> (size
- toff
))
2925 pi
= ((bp
->b_offset
& PAGE_MASK
) + toff
) >>
2938 VM_OBJECT_UNLOCK(obj
);
2941 * Step 3, fixup the KVM pmap. Remember that
2942 * bp->b_data is relative to bp->b_offset, but
2943 * bp->b_offset may be offset into the first page.
2946 bp
->b_data
= (caddr_t
)
2947 trunc_page((vm_offset_t
)bp
->b_data
);
2949 (vm_offset_t
)bp
->b_data
,
2954 bp
->b_data
= (caddr_t
)((vm_offset_t
)bp
->b_data
|
2955 (vm_offset_t
)(bp
->b_offset
& PAGE_MASK
));
2958 if (newbsize
< bp
->b_bufsize
)
2960 bp
->b_bufsize
= newbsize
; /* actual buffer allocation */
2961 bp
->b_bcount
= size
; /* requested buffer size */
2966 biodone(struct bio
*bp
)
2969 void (*done
)(struct bio
*);
2971 mtxp
= mtx_pool_find(mtxpool_sleep
, bp
);
2973 bp
->bio_flags
|= BIO_DONE
;
2974 done
= bp
->bio_done
;
2983 * Wait for a BIO to finish.
2985 * XXX: resort to a timeout for now. The optimal locking (if any) for this
2986 * case is not yet clear.
2989 biowait(struct bio
*bp
, const char *wchan
)
2993 mtxp
= mtx_pool_find(mtxpool_sleep
, bp
);
2995 while ((bp
->bio_flags
& BIO_DONE
) == 0)
2996 msleep(bp
, mtxp
, PRIBIO
, wchan
, hz
/ 10);
2998 if (bp
->bio_error
!= 0)
2999 return (bp
->bio_error
);
3000 if (!(bp
->bio_flags
& BIO_ERROR
))
3006 biofinish(struct bio
*bp
, struct devstat
*stat
, int error
)
3010 bp
->bio_error
= error
;
3011 bp
->bio_flags
|= BIO_ERROR
;
3014 devstat_end_transaction_bio(stat
, bp
);
3021 * Wait for buffer I/O completion, returning error status. The buffer
3022 * is left locked and B_DONE on return. B_EINTR is converted into an EINTR
3023 * error and cleared.
3026 bufwait(struct buf
*bp
)
3028 if (bp
->b_iocmd
== BIO_READ
)
3029 bwait(bp
, PRIBIO
, "biord");
3031 bwait(bp
, PRIBIO
, "biowr");
3032 if (bp
->b_flags
& B_EINTR
) {
3033 bp
->b_flags
&= ~B_EINTR
;
3036 if (bp
->b_ioflags
& BIO_ERROR
) {
3037 return (bp
->b_error
? bp
->b_error
: EIO
);
3044 * Call back function from struct bio back up to struct buf.
3047 bufdonebio(struct bio
*bip
)
3051 bp
= bip
->bio_caller2
;
3052 bp
->b_resid
= bp
->b_bcount
- bip
->bio_completed
;
3053 bp
->b_resid
= bip
->bio_resid
; /* XXX: remove */
3054 bp
->b_ioflags
= bip
->bio_flags
;
3055 bp
->b_error
= bip
->bio_error
;
3057 bp
->b_ioflags
|= BIO_ERROR
;
3063 dev_strategy(struct cdev
*dev
, struct buf
*bp
)
3068 if ((!bp
->b_iocmd
) || (bp
->b_iocmd
& (bp
->b_iocmd
- 1)))
3069 panic("b_iocmd botch");
3074 /* Try again later */
3075 tsleep(&bp
, PRIBIO
, "dev_strat", hz
/10);
3077 bip
->bio_cmd
= bp
->b_iocmd
;
3078 bip
->bio_offset
= bp
->b_iooffset
;
3079 bip
->bio_length
= bp
->b_bcount
;
3080 bip
->bio_bcount
= bp
->b_bcount
; /* XXX: remove */
3081 bip
->bio_data
= bp
->b_data
;
3082 bip
->bio_done
= bufdonebio
;
3083 bip
->bio_caller2
= bp
;
3085 KASSERT(dev
->si_refcount
> 0,
3086 ("dev_strategy on un-referenced struct cdev *(%s)",
3088 csw
= dev_refthread(dev
);
3091 bp
->b_error
= ENXIO
;
3092 bp
->b_ioflags
= BIO_ERROR
;
3096 (*csw
->d_strategy
)(bip
);
3103 * Finish I/O on a buffer, optionally calling a completion function.
3104 * This is usually called from an interrupt so process blocking is
3107 * biodone is also responsible for setting B_CACHE in a B_VMIO bp.
3108 * In a non-VMIO bp, B_CACHE will be set on the next getblk()
3109 * assuming B_INVAL is clear.
3111 * For the VMIO case, we set B_CACHE if the op was a read and no
3112 * read error occured, or if the op was a write. B_CACHE is never
3113 * set if the buffer is invalid or otherwise uncacheable.
3115 * biodone does not mess with B_INVAL, allowing the I/O routine or the
3116 * initiator to leave B_INVAL set to brelse the buffer out of existance
3117 * in the biodone routine.
3120 bufdone(struct buf
*bp
)
3122 struct bufobj
*dropobj
;
3123 void (*biodone
)(struct buf
*);
3125 CTR3(KTR_BUF
, "bufdone(%p) vp %p flags %X", bp
, bp
->b_vp
, bp
->b_flags
);
3128 KASSERT(!(bp
->b_flags
& B_DONE
), ("biodone: bp %p already done", bp
));
3129 BUF_ASSERT_HELD(bp
);
3131 runningbufwakeup(bp
);
3132 if (bp
->b_iocmd
== BIO_WRITE
)
3133 dropobj
= bp
->b_bufobj
;
3134 /* call optional completion function if requested */
3135 if (bp
->b_iodone
!= NULL
) {
3136 biodone
= bp
->b_iodone
;
3137 bp
->b_iodone
= NULL
;
3140 bufobj_wdrop(dropobj
);
3147 bufobj_wdrop(dropobj
);
3151 bufdone_finish(struct buf
*bp
)
3153 BUF_ASSERT_HELD(bp
);
3155 if (!LIST_EMPTY(&bp
->b_dep
))
3158 if (bp
->b_flags
& B_VMIO
) {
3164 struct vnode
*vp
= bp
->b_vp
;
3165 boolean_t are_queues_locked
;
3167 obj
= bp
->b_bufobj
->bo_object
;
3169 #if defined(VFS_BIO_DEBUG)
3170 mp_fixme("usecount and vflag accessed without locks.");
3171 if (vp
->v_usecount
== 0) {
3172 panic("biodone: zero vnode ref count");
3175 KASSERT(vp
->v_object
!= NULL
,
3176 ("biodone: vnode %p has no vm_object", vp
));
3179 foff
= bp
->b_offset
;
3180 KASSERT(bp
->b_offset
!= NOOFFSET
,
3181 ("biodone: no buffer offset"));
3183 VM_OBJECT_LOCK(obj
);
3184 #if defined(VFS_BIO_DEBUG)
3185 if (obj
->paging_in_progress
< bp
->b_npages
) {
3186 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
3187 obj
->paging_in_progress
, bp
->b_npages
);
3192 * Set B_CACHE if the op was a normal read and no error
3193 * occured. B_CACHE is set for writes in the b*write()
3196 iosize
= bp
->b_bcount
- bp
->b_resid
;
3197 if (bp
->b_iocmd
== BIO_READ
&&
3198 !(bp
->b_flags
& (B_INVAL
|B_NOCACHE
)) &&
3199 !(bp
->b_ioflags
& BIO_ERROR
)) {
3200 bp
->b_flags
|= B_CACHE
;
3202 if (bp
->b_iocmd
== BIO_READ
) {
3203 vm_page_lock_queues();
3204 are_queues_locked
= TRUE
;
3206 are_queues_locked
= FALSE
;
3207 for (i
= 0; i
< bp
->b_npages
; i
++) {
3211 resid
= ((foff
+ PAGE_SIZE
) & ~(off_t
)PAGE_MASK
) - foff
;
3216 * cleanup bogus pages, restoring the originals
3219 if (m
== bogus_page
) {
3221 m
= vm_page_lookup(obj
, OFF_TO_IDX(foff
));
3223 panic("biodone: page disappeared!");
3225 pmap_qenter(trunc_page((vm_offset_t
)bp
->b_data
),
3226 bp
->b_pages
, bp
->b_npages
);
3228 #if defined(VFS_BIO_DEBUG)
3229 if (OFF_TO_IDX(foff
) != m
->pindex
) {
3231 "biodone: foff(%jd)/m->pindex(%ju) mismatch\n",
3232 (intmax_t)foff
, (uintmax_t)m
->pindex
);
3237 * In the write case, the valid and clean bits are
3238 * already changed correctly ( see bdwrite() ), so we
3239 * only need to do this here in the read case.
3241 if ((bp
->b_iocmd
== BIO_READ
) && !bogusflag
&& resid
> 0) {
3242 vfs_page_set_valid(bp
, foff
, m
);
3246 * when debugging new filesystems or buffer I/O methods, this
3247 * is the most common error that pops up. if you see this, you
3248 * have not set the page busy flag correctly!!!
3251 printf("biodone: page busy < 0, "
3252 "pindex: %d, foff: 0x(%x,%x), "
3253 "resid: %d, index: %d\n",
3254 (int) m
->pindex
, (int)(foff
>> 32),
3255 (int) foff
& 0xffffffff, resid
, i
);
3256 if (!vn_isdisk(vp
, NULL
))
3257 printf(" iosize: %jd, lblkno: %jd, flags: 0x%x, npages: %d\n",
3258 (intmax_t)bp
->b_vp
->v_mount
->mnt_stat
.f_iosize
,
3259 (intmax_t) bp
->b_lblkno
,
3260 bp
->b_flags
, bp
->b_npages
);
3262 printf(" VDEV, lblkno: %jd, flags: 0x%x, npages: %d\n",
3263 (intmax_t) bp
->b_lblkno
,
3264 bp
->b_flags
, bp
->b_npages
);
3265 printf(" valid: 0x%lx, dirty: 0x%lx, wired: %d\n",
3266 (u_long
)m
->valid
, (u_long
)m
->dirty
,
3268 panic("biodone: page busy < 0\n");
3270 vm_page_io_finish(m
);
3271 vm_object_pip_subtract(obj
, 1);
3272 foff
= (foff
+ PAGE_SIZE
) & ~(off_t
)PAGE_MASK
;
3275 if (are_queues_locked
)
3276 vm_page_unlock_queues();
3277 vm_object_pip_wakeupn(obj
, 0);
3278 VM_OBJECT_UNLOCK(obj
);
3282 * For asynchronous completions, release the buffer now. The brelse
3283 * will do a wakeup there if necessary - so no need to do a wakeup
3284 * here in the async case. The sync case always needs to do a wakeup.
3287 if (bp
->b_flags
& B_ASYNC
) {
3288 if ((bp
->b_flags
& (B_NOCACHE
| B_INVAL
| B_RELBUF
)) || (bp
->b_ioflags
& BIO_ERROR
))
3297 * This routine is called in lieu of iodone in the case of
3298 * incomplete I/O. This keeps the busy status for pages
3302 vfs_unbusy_pages(struct buf
*bp
)
3308 runningbufwakeup(bp
);
3309 if (!(bp
->b_flags
& B_VMIO
))
3312 obj
= bp
->b_bufobj
->bo_object
;
3313 VM_OBJECT_LOCK(obj
);
3314 for (i
= 0; i
< bp
->b_npages
; i
++) {
3316 if (m
== bogus_page
) {
3317 m
= vm_page_lookup(obj
, OFF_TO_IDX(bp
->b_offset
) + i
);
3319 panic("vfs_unbusy_pages: page missing\n");
3321 pmap_qenter(trunc_page((vm_offset_t
)bp
->b_data
),
3322 bp
->b_pages
, bp
->b_npages
);
3324 vm_object_pip_subtract(obj
, 1);
3325 vm_page_io_finish(m
);
3327 vm_object_pip_wakeupn(obj
, 0);
3328 VM_OBJECT_UNLOCK(obj
);
3332 * vfs_page_set_valid:
3334 * Set the valid bits in a page based on the supplied offset. The
3335 * range is restricted to the buffer's size.
3337 * This routine is typically called after a read completes.
3340 vfs_page_set_valid(struct buf
*bp
, vm_ooffset_t off
, vm_page_t m
)
3342 vm_ooffset_t soff
, eoff
;
3344 mtx_assert(&vm_page_queue_mtx
, MA_OWNED
);
3346 * Start and end offsets in buffer. eoff - soff may not cross a
3347 * page boundry or cross the end of the buffer. The end of the
3348 * buffer, in this case, is our file EOF, not the allocation size
3352 eoff
= (off
+ PAGE_SIZE
) & ~(off_t
)PAGE_MASK
;
3353 if (eoff
> bp
->b_offset
+ bp
->b_bcount
)
3354 eoff
= bp
->b_offset
+ bp
->b_bcount
;
3357 * Set valid range. This is typically the entire buffer and thus the
3361 vm_page_set_validclean(
3363 (vm_offset_t
) (soff
& PAGE_MASK
),
3364 (vm_offset_t
) (eoff
- soff
)
3370 * This routine is called before a device strategy routine.
3371 * It is used to tell the VM system that paging I/O is in
3372 * progress, and treat the pages associated with the buffer
3373 * almost as being VPO_BUSY. Also the object paging_in_progress
3374 * flag is handled to make sure that the object doesn't become
3377 * Since I/O has not been initiated yet, certain buffer flags
3378 * such as BIO_ERROR or B_INVAL may be in an inconsistant state
3379 * and should be ignored.
3382 vfs_busy_pages(struct buf
*bp
, int clear_modify
)
3389 if (!(bp
->b_flags
& B_VMIO
))
3392 obj
= bp
->b_bufobj
->bo_object
;
3393 foff
= bp
->b_offset
;
3394 KASSERT(bp
->b_offset
!= NOOFFSET
,
3395 ("vfs_busy_pages: no buffer offset"));
3396 VM_OBJECT_LOCK(obj
);
3397 if (bp
->b_bufsize
!= 0)
3398 vfs_setdirty_locked_object(bp
);
3400 for (i
= 0; i
< bp
->b_npages
; i
++) {
3403 if (vm_page_sleep_if_busy(m
, FALSE
, "vbpage"))
3407 vm_page_lock_queues();
3408 for (i
= 0; i
< bp
->b_npages
; i
++) {
3411 if ((bp
->b_flags
& B_CLUSTER
) == 0) {
3412 vm_object_pip_add(obj
, 1);
3413 vm_page_io_start(m
);
3416 * When readying a buffer for a read ( i.e
3417 * clear_modify == 0 ), it is important to do
3418 * bogus_page replacement for valid pages in
3419 * partially instantiated buffers. Partially
3420 * instantiated buffers can, in turn, occur when
3421 * reconstituting a buffer from its VM backing store
3422 * base. We only have to do this if B_CACHE is
3423 * clear ( which causes the I/O to occur in the
3424 * first place ). The replacement prevents the read
3425 * I/O from overwriting potentially dirty VM-backed
3426 * pages. XXX bogus page replacement is, uh, bogus.
3427 * It may not work properly with small-block devices.
3428 * We need to find a better way.
3432 vfs_page_set_valid(bp
, foff
, m
);
3433 else if (m
->valid
== VM_PAGE_BITS_ALL
&&
3434 (bp
->b_flags
& B_CACHE
) == 0) {
3435 bp
->b_pages
[i
] = bogus_page
;
3438 foff
= (foff
+ PAGE_SIZE
) & ~(off_t
)PAGE_MASK
;
3440 vm_page_unlock_queues();
3441 VM_OBJECT_UNLOCK(obj
);
3443 pmap_qenter(trunc_page((vm_offset_t
)bp
->b_data
),
3444 bp
->b_pages
, bp
->b_npages
);
3448 * Tell the VM system that the pages associated with this buffer
3449 * are clean. This is used for delayed writes where the data is
3450 * going to go to disk eventually without additional VM intevention.
3452 * Note that while we only really need to clean through to b_bcount, we
3453 * just go ahead and clean through to b_bufsize.
3456 vfs_clean_pages(struct buf
*bp
)
3459 vm_ooffset_t foff
, noff
, eoff
;
3462 if (!(bp
->b_flags
& B_VMIO
))
3465 foff
= bp
->b_offset
;
3466 KASSERT(bp
->b_offset
!= NOOFFSET
,
3467 ("vfs_clean_pages: no buffer offset"));
3468 VM_OBJECT_LOCK(bp
->b_bufobj
->bo_object
);
3469 vm_page_lock_queues();
3470 for (i
= 0; i
< bp
->b_npages
; i
++) {
3472 noff
= (foff
+ PAGE_SIZE
) & ~(off_t
)PAGE_MASK
;
3475 if (eoff
> bp
->b_offset
+ bp
->b_bufsize
)
3476 eoff
= bp
->b_offset
+ bp
->b_bufsize
;
3477 vfs_page_set_valid(bp
, foff
, m
);
3478 /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
3481 vm_page_unlock_queues();
3482 VM_OBJECT_UNLOCK(bp
->b_bufobj
->bo_object
);
3486 * vfs_bio_set_validclean:
3488 * Set the range within the buffer to valid and clean. The range is
3489 * relative to the beginning of the buffer, b_offset. Note that b_offset
3490 * itself may be offset from the beginning of the first page.
3495 vfs_bio_set_validclean(struct buf
*bp
, int base
, int size
)
3500 if (!(bp
->b_flags
& B_VMIO
))
3503 * Fixup base to be relative to beginning of first page.
3504 * Set initial n to be the maximum number of bytes in the
3505 * first page that can be validated.
3508 base
+= (bp
->b_offset
& PAGE_MASK
);
3509 n
= PAGE_SIZE
- (base
& PAGE_MASK
);
3511 VM_OBJECT_LOCK(bp
->b_bufobj
->bo_object
);
3512 vm_page_lock_queues();
3513 for (i
= base
/ PAGE_SIZE
; size
> 0 && i
< bp
->b_npages
; ++i
) {
3517 vm_page_set_validclean(m
, base
& PAGE_MASK
, n
);
3522 vm_page_unlock_queues();
3523 VM_OBJECT_UNLOCK(bp
->b_bufobj
->bo_object
);
3529 * clear a buffer. This routine essentially fakes an I/O, so we need
3530 * to clear BIO_ERROR and B_INVAL.
3532 * Note that while we only theoretically need to clear through b_bcount,
3533 * we go ahead and clear through b_bufsize.
3537 vfs_bio_clrbuf(struct buf
*bp
)
3542 if ((bp
->b_flags
& (B_VMIO
| B_MALLOC
)) != B_VMIO
) {
3547 bp
->b_flags
&= ~B_INVAL
;
3548 bp
->b_ioflags
&= ~BIO_ERROR
;
3549 VM_OBJECT_LOCK(bp
->b_bufobj
->bo_object
);
3550 if ((bp
->b_npages
== 1) && (bp
->b_bufsize
< PAGE_SIZE
) &&
3551 (bp
->b_offset
& PAGE_MASK
) == 0) {
3552 if (bp
->b_pages
[0] == bogus_page
)
3554 mask
= (1 << (bp
->b_bufsize
/ DEV_BSIZE
)) - 1;
3555 VM_OBJECT_LOCK_ASSERT(bp
->b_pages
[0]->object
, MA_OWNED
);
3556 if ((bp
->b_pages
[0]->valid
& mask
) == mask
)
3558 if (((bp
->b_pages
[0]->flags
& PG_ZERO
) == 0) &&
3559 ((bp
->b_pages
[0]->valid
& mask
) == 0)) {
3560 bzero(bp
->b_data
, bp
->b_bufsize
);
3561 bp
->b_pages
[0]->valid
|= mask
;
3565 ea
= sa
= bp
->b_data
;
3566 for(i
= 0; i
< bp
->b_npages
; i
++, sa
= ea
) {
3567 ea
= (caddr_t
)trunc_page((vm_offset_t
)sa
+ PAGE_SIZE
);
3568 ea
= (caddr_t
)(vm_offset_t
)ulmin(
3569 (u_long
)(vm_offset_t
)ea
,
3570 (u_long
)(vm_offset_t
)bp
->b_data
+ bp
->b_bufsize
);
3571 if (bp
->b_pages
[i
] == bogus_page
)
3573 j
= ((vm_offset_t
)sa
& PAGE_MASK
) / DEV_BSIZE
;
3574 mask
= ((1 << ((ea
- sa
) / DEV_BSIZE
)) - 1) << j
;
3575 VM_OBJECT_LOCK_ASSERT(bp
->b_pages
[i
]->object
, MA_OWNED
);
3576 if ((bp
->b_pages
[i
]->valid
& mask
) == mask
)
3578 if ((bp
->b_pages
[i
]->valid
& mask
) == 0) {
3579 if ((bp
->b_pages
[i
]->flags
& PG_ZERO
) == 0)
3582 for (; sa
< ea
; sa
+= DEV_BSIZE
, j
++) {
3583 if (((bp
->b_pages
[i
]->flags
& PG_ZERO
) == 0) &&
3584 (bp
->b_pages
[i
]->valid
& (1 << j
)) == 0)
3585 bzero(sa
, DEV_BSIZE
);
3588 bp
->b_pages
[i
]->valid
|= mask
;
3591 VM_OBJECT_UNLOCK(bp
->b_bufobj
->bo_object
);
3596 * vm_hold_load_pages and vm_hold_free_pages get pages into
3597 * a buffers address space. The pages are anonymous and are
3598 * not associated with a file object.
3601 vm_hold_load_pages(struct buf
*bp
, vm_offset_t from
, vm_offset_t to
)
3607 to
= round_page(to
);
3608 from
= round_page(from
);
3609 index
= (from
- trunc_page((vm_offset_t
)bp
->b_data
)) >> PAGE_SHIFT
;
3611 VM_OBJECT_LOCK(kernel_object
);
3612 for (pg
= from
; pg
< to
; pg
+= PAGE_SIZE
, index
++) {
3615 * note: must allocate system pages since blocking here
3616 * could intefere with paging I/O, no matter which
3619 p
= vm_page_alloc(kernel_object
,
3620 ((pg
- VM_MIN_KERNEL_ADDRESS
) >> PAGE_SHIFT
),
3621 VM_ALLOC_NOBUSY
| VM_ALLOC_SYSTEM
| VM_ALLOC_WIRED
);
3623 atomic_add_int(&vm_pageout_deficit
,
3624 (to
- pg
) >> PAGE_SHIFT
);
3625 VM_OBJECT_UNLOCK(kernel_object
);
3627 VM_OBJECT_LOCK(kernel_object
);
3630 p
->valid
= VM_PAGE_BITS_ALL
;
3631 pmap_qenter(pg
, &p
, 1);
3632 bp
->b_pages
[index
] = p
;
3634 VM_OBJECT_UNLOCK(kernel_object
);
3635 bp
->b_npages
= index
;
3638 /* Return pages associated with this buf to the vm system */
3640 vm_hold_free_pages(struct buf
*bp
, vm_offset_t from
, vm_offset_t to
)
3644 int index
, newnpages
;
3646 from
= round_page(from
);
3647 to
= round_page(to
);
3648 newnpages
= index
= (from
- trunc_page((vm_offset_t
)bp
->b_data
)) >> PAGE_SHIFT
;
3650 VM_OBJECT_LOCK(kernel_object
);
3651 for (pg
= from
; pg
< to
; pg
+= PAGE_SIZE
, index
++) {
3652 p
= bp
->b_pages
[index
];
3653 if (p
&& (index
< bp
->b_npages
)) {
3656 "vm_hold_free_pages: blkno: %jd, lblkno: %jd\n",
3657 (intmax_t)bp
->b_blkno
,
3658 (intmax_t)bp
->b_lblkno
);
3660 bp
->b_pages
[index
] = NULL
;
3661 pmap_qremove(pg
, 1);
3662 vm_page_lock_queues();
3663 vm_page_unwire(p
, 0);
3665 vm_page_unlock_queues();
3668 VM_OBJECT_UNLOCK(kernel_object
);
3669 bp
->b_npages
= newnpages
;
3673 * Map an IO request into kernel virtual address space.
3675 * All requests are (re)mapped into kernel VA space.
3676 * Notice that we use b_bufsize for the size of the buffer
3677 * to be mapped. b_bcount might be modified by the driver.
3679 * Note that even if the caller determines that the address space should
3680 * be valid, a race or a smaller-file mapped into a larger space may
3681 * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
3682 * check the return value.
3685 vmapbuf(struct buf
*bp
)
3691 struct pmap
*pmap
= &curproc
->p_vmspace
->vm_pmap
;
3693 if (bp
->b_bufsize
< 0)
3695 prot
= VM_PROT_READ
;
3696 if (bp
->b_iocmd
== BIO_READ
)
3697 prot
|= VM_PROT_WRITE
; /* Less backwards than it looks */
3698 for (addr
= (caddr_t
)trunc_page((vm_offset_t
)bp
->b_data
), pidx
= 0;
3699 addr
< bp
->b_data
+ bp
->b_bufsize
;
3700 addr
+= PAGE_SIZE
, pidx
++) {
3702 * Do the vm_fault if needed; do the copy-on-write thing
3703 * when reading stuff off device into memory.
3705 * NOTE! Must use pmap_extract() because addr may be in
3706 * the userland address space, and kextract is only guarenteed
3707 * to work for the kernland address space (see: sparc64 port).
3710 if (vm_fault_quick(addr
>= bp
->b_data
? addr
: bp
->b_data
,
3712 vm_page_lock_queues();
3713 for (i
= 0; i
< pidx
; ++i
) {
3714 vm_page_unhold(bp
->b_pages
[i
]);
3715 bp
->b_pages
[i
] = NULL
;
3717 vm_page_unlock_queues();
3720 m
= pmap_extract_and_hold(pmap
, (vm_offset_t
)addr
, prot
);
3723 bp
->b_pages
[pidx
] = m
;
3725 if (pidx
> btoc(MAXPHYS
))
3726 panic("vmapbuf: mapped more than MAXPHYS");
3727 pmap_qenter((vm_offset_t
)bp
->b_saveaddr
, bp
->b_pages
, pidx
);
3729 kva
= bp
->b_saveaddr
;
3730 bp
->b_npages
= pidx
;
3731 bp
->b_saveaddr
= bp
->b_data
;
3732 bp
->b_data
= kva
+ (((vm_offset_t
) bp
->b_data
) & PAGE_MASK
);
3737 * Free the io map PTEs associated with this IO operation.
3738 * We also invalidate the TLB entries and restore the original b_addr.
3741 vunmapbuf(struct buf
*bp
)
3746 npages
= bp
->b_npages
;
3747 pmap_qremove(trunc_page((vm_offset_t
)bp
->b_data
), npages
);
3748 vm_page_lock_queues();
3749 for (pidx
= 0; pidx
< npages
; pidx
++)
3750 vm_page_unhold(bp
->b_pages
[pidx
]);
3751 vm_page_unlock_queues();
3753 bp
->b_data
= bp
->b_saveaddr
;
3757 bdone(struct buf
*bp
)
3761 mtxp
= mtx_pool_find(mtxpool_sleep
, bp
);
3763 bp
->b_flags
|= B_DONE
;
3769 bwait(struct buf
*bp
, u_char pri
, const char *wchan
)
3773 mtxp
= mtx_pool_find(mtxpool_sleep
, bp
);
3775 while ((bp
->b_flags
& B_DONE
) == 0)
3776 msleep(bp
, mtxp
, pri
, wchan
, 0);
3781 bufsync(struct bufobj
*bo
, int waitfor
, struct thread
*td
)
3784 return (VOP_FSYNC(bo
->__bo_vnode
, waitfor
, td
));
3788 bufstrategy(struct bufobj
*bo
, struct buf
*bp
)
3794 KASSERT(vp
== bo
->bo_private
, ("Inconsistent vnode bufstrategy"));
3795 KASSERT(vp
->v_type
!= VCHR
&& vp
->v_type
!= VBLK
,
3796 ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp
, vp
));
3797 i
= VOP_STRATEGY(vp
, bp
);
3798 KASSERT(i
== 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp
, bp
->b_vp
));
3802 bufobj_wrefl(struct bufobj
*bo
)
3805 KASSERT(bo
!= NULL
, ("NULL bo in bufobj_wref"));
3806 ASSERT_BO_LOCKED(bo
);
3811 bufobj_wref(struct bufobj
*bo
)
3814 KASSERT(bo
!= NULL
, ("NULL bo in bufobj_wref"));
3821 bufobj_wdrop(struct bufobj
*bo
)
3824 KASSERT(bo
!= NULL
, ("NULL bo in bufobj_wdrop"));
3826 KASSERT(bo
->bo_numoutput
> 0, ("bufobj_wdrop non-positive count"));
3827 if ((--bo
->bo_numoutput
== 0) && (bo
->bo_flag
& BO_WWAIT
)) {
3828 bo
->bo_flag
&= ~BO_WWAIT
;
3829 wakeup(&bo
->bo_numoutput
);
3835 bufobj_wwait(struct bufobj
*bo
, int slpflag
, int timeo
)
3839 KASSERT(bo
!= NULL
, ("NULL bo in bufobj_wwait"));
3840 ASSERT_BO_LOCKED(bo
);
3842 while (bo
->bo_numoutput
) {
3843 bo
->bo_flag
|= BO_WWAIT
;
3844 error
= msleep(&bo
->bo_numoutput
, BO_MTX(bo
),
3845 slpflag
| (PRIBIO
+ 1), "bo_wwait", timeo
);
3853 bpin(struct buf
*bp
)
3857 mtxp
= mtx_pool_find(mtxpool_sleep
, bp
);
3864 bunpin(struct buf
*bp
)
3868 mtxp
= mtx_pool_find(mtxpool_sleep
, bp
);
3870 if (--bp
->b_pin_count
== 0)
3876 bunpin_wait(struct buf
*bp
)
3880 mtxp
= mtx_pool_find(mtxpool_sleep
, bp
);
3882 while (bp
->b_pin_count
> 0)
3883 msleep(bp
, mtxp
, PRIBIO
, "bwunpin", 0);
3887 #include "opt_ddb.h"
3889 #include <ddb/ddb.h>
3891 /* DDB command to show buffer data */
3892 DB_SHOW_COMMAND(buffer
, db_show_buffer
)
3895 struct buf
*bp
= (struct buf
*)addr
;
3898 db_printf("usage: show buffer <addr>\n");
3902 db_printf("buf at %p\n", bp
);
3903 db_printf("b_flags = 0x%b\n", (u_int
)bp
->b_flags
, PRINT_BUF_FLAGS
);
3905 "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
3906 "b_bufobj = (%p), b_data = %p, b_blkno = %jd\n",
3907 bp
->b_error
, bp
->b_bufsize
, bp
->b_bcount
, bp
->b_resid
,
3908 bp
->b_bufobj
, bp
->b_data
, (intmax_t)bp
->b_blkno
);
3911 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp
->b_npages
);
3912 for (i
= 0; i
< bp
->b_npages
; i
++) {
3915 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m
->object
,
3916 (u_long
)m
->pindex
, (u_long
)VM_PAGE_TO_PHYS(m
));
3917 if ((i
+ 1) < bp
->b_npages
)
3922 lockmgr_printinfo(&bp
->b_lock
);
3925 DB_SHOW_COMMAND(lockedbufs
, lockedbufs
)
3930 for (i
= 0; i
< nbuf
; i
++) {
3932 if (BUF_ISLOCKED(bp
)) {
3933 db_show_buffer((uintptr_t)bp
, 1, 0, NULL
);