3 * The Regents of the University of California. All rights reserved.
4 * Modifications/enhancements:
5 * Copyright (c) 1995 John S. Dyson. All rights reserved.
6 * Copyright (c) 2012-2013 Matthew Dillon. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include "opt_debug_cluster.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
40 #include <sys/vnode.h>
41 #include <sys/malloc.h>
42 #include <sys/mount.h>
43 #include <sys/resourcevar.h>
44 #include <sys/vmmeter.h>
46 #include <vm/vm_object.h>
47 #include <vm/vm_page.h>
48 #include <sys/sysctl.h>
51 #include <vm/vm_page2.h>
53 #include <machine/limits.h>
56 * Cluster tracking cache - replaces the original vnode v_* fields which had
57 * limited utility and were not MP safe.
59 * The cluster tracking cache is a simple 4-way set-associative non-chained
60 * cache. It is capable of tracking up to four zones separated by 1MB or
63 * NOTE: We want this structure to be cache-line friendly so the iterator
64 * is embedded rather than in a separate array.
66 * NOTE: A cluster cache entry can become stale when a vnode is recycled.
67 * For now we treat the values as heuristical but also self-consistent.
68 * i.e. the values cannot be completely random and cannot be SMP unsafe
69 * or the cluster code might end-up clustering non-contiguous buffers
70 * at the wrong offsets.
72 struct cluster_cache
{
75 off_t v_lastw
; /* last write (write cluster) */
76 off_t v_cstart
; /* start block of cluster */
77 off_t v_lasta
; /* last allocation */
78 u_int v_clen
; /* length of current cluster */
82 typedef struct cluster_cache cluster_cache_t
;
84 #define CLUSTER_CACHE_SIZE 512
85 #define CLUSTER_CACHE_MASK (CLUSTER_CACHE_SIZE - 1)
87 #define CLUSTER_ZONE ((off_t)(1024 * 1024))
89 cluster_cache_t cluster_array
[CLUSTER_CACHE_SIZE
];
91 #if defined(CLUSTERDEBUG)
92 #include <sys/sysctl.h>
93 static int rcluster
= 0;
94 SYSCTL_INT(_debug
, OID_AUTO
, rcluster
, CTLFLAG_RW
, &rcluster
, 0, "");
97 static MALLOC_DEFINE(M_SEGMENT
, "cluster_save", "cluster_save buffer");
99 static struct cluster_save
*
100 cluster_collectbufs (cluster_cache_t
*cc
, struct vnode
*vp
,
101 struct buf
*last_bp
, int blksize
);
103 cluster_rbuild (struct vnode
*vp
, off_t filesize
, off_t loffset
,
104 off_t doffset
, int blksize
, int run
,
106 static void cluster_callback (struct bio
*);
107 static void cluster_setram (struct buf
*);
108 static int cluster_wbuild(struct vnode
*vp
, struct buf
**bpp
, int blksize
,
109 off_t start_loffset
, int bytes
);
111 static int write_behind
= 1;
112 SYSCTL_INT(_vfs
, OID_AUTO
, write_behind
, CTLFLAG_RW
, &write_behind
, 0,
113 "Cluster write-behind setting");
114 static quad_t write_behind_minfilesize
= 10 * 1024 * 1024;
115 SYSCTL_QUAD(_vfs
, OID_AUTO
, write_behind_minfilesize
, CTLFLAG_RW
,
116 &write_behind_minfilesize
, 0, "Cluster write-behind setting");
117 static int max_readahead
= 2 * 1024 * 1024;
118 SYSCTL_INT(_vfs
, OID_AUTO
, max_readahead
, CTLFLAG_RW
, &max_readahead
, 0,
119 "Limit in bytes for desired cluster read-ahead");
121 extern vm_page_t bogus_page
;
123 extern int cluster_pbuf_freecnt
;
126 * Acquire/release cluster cache (can return dummy entry)
130 cluster_getcache(cluster_cache_t
*dummy
, struct vnode
*vp
, off_t loffset
)
137 hv
= (size_t)(intptr_t)vp
^ (size_t)(intptr_t)vp
/ sizeof(*vp
);
138 hv
&= CLUSTER_CACHE_MASK
& ~3;
139 cc
= &cluster_array
[hv
];
142 for (i
= 0; i
< 4; ++i
) {
145 if (((cc
[i
].v_cstart
^ loffset
) & ~(CLUSTER_ZONE
- 1)) == 0) {
150 if (xact
>= 0 && atomic_swap_int(&cc
[xact
].locked
, 1) == 0) {
151 if (cc
[xact
].vp
== vp
&&
152 ((cc
[i
].v_cstart
^ loffset
) & ~(CLUSTER_ZONE
- 1)) == 0) {
155 atomic_swap_int(&cc
[xact
].locked
, 0);
159 * New entry. If we can't acquire the cache line then use the
160 * passed-in dummy element and reset all fields.
162 * When we are able to acquire the cache line we only clear the
163 * fields if the vp does not match. This allows us to multi-zone
164 * a vp and for excessive zones / partial clusters to be retired.
166 i
= cc
->iterator
++ & 3;
168 if (atomic_swap_int(&cc
->locked
, 1) != 0) {
185 cluster_putcache(cluster_cache_t
*cc
)
187 atomic_swap_int(&cc
->locked
, 0);
191 * This replaces bread(), providing a synchronous read of the requested
192 * buffer plus asynchronous read-ahead within the specified bounds.
194 * The caller may pre-populate *bpp if it already has the requested buffer
195 * in-hand, else must set *bpp to NULL. Note that the cluster_read() inline
196 * sets *bpp to NULL and then calls cluster_readx() for compatibility.
198 * filesize - read-ahead @ blksize will not cross this boundary
199 * loffset - loffset for returned *bpp
200 * blksize - blocksize for returned *bpp and read-ahead bps
201 * minreq - minimum (not a hard minimum) in bytes, typically reflects
202 * a higher level uio resid.
203 * maxreq - maximum (sequential heuristic) in bytes (highet typ ~2MB)
204 * bpp - return buffer (*bpp) for (loffset,blksize)
207 cluster_readx(struct vnode
*vp
, off_t filesize
, off_t loffset
,
208 int blksize
, size_t minreq
, size_t maxreq
, struct buf
**bpp
)
210 struct buf
*bp
, *rbp
, *reqbp
;
221 * Calculate the desired read-ahead in blksize'd blocks (maxra).
222 * To do this we calculate maxreq.
224 * maxreq typically starts out as a sequential heuristic. If the
225 * high level uio/resid is bigger (minreq), we pop maxreq up to
226 * minreq. This represents the case where random I/O is being
227 * performed by the userland is issuing big read()'s.
229 * Then we limit maxreq to max_readahead to ensure it is a reasonable
232 * Finally we must ensure that (loffset + maxreq) does not cross the
233 * boundary (filesize) for the current blocksize. If we allowed it
234 * to cross we could end up with buffers past the boundary with the
235 * wrong block size (HAMMER large-data areas use mixed block sizes).
236 * minreq is also absolutely limited to filesize.
240 /* minreq not used beyond this point */
242 if (maxreq
> max_readahead
) {
243 maxreq
= max_readahead
;
244 if (maxreq
> 16 * 1024 * 1024)
245 maxreq
= 16 * 1024 * 1024;
247 if (maxreq
< blksize
)
249 if (loffset
+ maxreq
> filesize
) {
250 if (loffset
> filesize
)
253 maxreq
= filesize
- loffset
;
256 maxra
= (int)(maxreq
/ blksize
);
259 * Get the requested block.
264 *bpp
= reqbp
= bp
= getblk(vp
, loffset
, blksize
, 0, 0);
265 origoffset
= loffset
;
268 * Calculate the maximum cluster size for a single I/O, used
269 * by cluster_rbuild().
271 maxrbuild
= vmaxiosize(vp
) / blksize
;
274 * if it is in the cache, then check to see if the reads have been
275 * sequential. If they have, then try some read-ahead, otherwise
276 * back-off on prospective read-aheads.
278 if (bp
->b_flags
& B_CACHE
) {
280 * Not sequential, do not do any read-ahead
286 * No read-ahead mark, do not do any read-ahead
289 if ((bp
->b_flags
& B_RAM
) == 0)
293 * We hit a read-ahead-mark, figure out how much read-ahead
294 * to do (maxra) and where to start (loffset).
296 * Shortcut the scan. Typically the way this works is that
297 * we've built up all the blocks inbetween except for the
298 * last in previous iterations, so if the second-to-last
299 * block is present we just skip ahead to it.
301 * This algorithm has O(1) cpu in the steady state no
302 * matter how large maxra is.
304 bp
->b_flags
&= ~B_RAM
;
306 if (findblk(vp
, loffset
+ (maxra
- 2) * blksize
, FINDBLK_TEST
))
311 if (findblk(vp
, loffset
+ i
* blksize
,
312 FINDBLK_TEST
) == NULL
) {
319 * We got everything or everything is in the cache, no
326 * Calculate where to start the read-ahead and how much
327 * to do. Generally speaking we want to read-ahead by
328 * (maxra) when we've found a read-ahead mark. We do
329 * not want to reduce maxra here as it will cause
330 * successive read-ahead I/O's to be smaller and smaller.
332 * However, we have to make sure we don't break the
333 * filesize limitation for the clustered operation.
335 loffset
+= i
* blksize
;
338 if (loffset
>= filesize
)
340 if (loffset
+ maxra
* blksize
> filesize
) {
341 maxreq
= filesize
- loffset
;
342 maxra
= (int)(maxreq
/ blksize
);
345 __debugvar off_t firstread
= bp
->b_loffset
;
349 * Set-up synchronous read for bp.
351 bp
->b_cmd
= BUF_CMD_READ
;
352 bp
->b_bio1
.bio_done
= biodone_sync
;
353 bp
->b_bio1
.bio_flags
|= BIO_SYNC
;
355 KASSERT(firstread
!= NOOFFSET
,
356 ("cluster_read: no buffer offset"));
359 * nblks is our cluster_rbuild request size, limited
360 * primarily by the device.
362 if ((nblks
= maxra
) > maxrbuild
)
368 error
= VOP_BMAP(vp
, loffset
, &doffset
,
369 &burstbytes
, NULL
, BUF_CMD_READ
);
371 goto single_block_read
;
372 if (nblks
> burstbytes
/ blksize
)
373 nblks
= burstbytes
/ blksize
;
374 if (doffset
== NOOFFSET
)
375 goto single_block_read
;
377 goto single_block_read
;
379 bp
= cluster_rbuild(vp
, filesize
, loffset
,
380 doffset
, blksize
, nblks
, bp
);
381 loffset
+= bp
->b_bufsize
;
382 maxra
-= bp
->b_bufsize
/ blksize
;
386 * If it isn't in the cache, then get a chunk from
387 * disk if sequential, otherwise just get the block.
396 * If B_CACHE was not set issue bp. bp will either be an
397 * asynchronous cluster buf or a synchronous single-buf.
398 * If it is a single buf it will be the same as reqbp.
400 * NOTE: Once an async cluster buf is issued bp becomes invalid.
403 #if defined(CLUSTERDEBUG)
405 kprintf("S(%012jx,%d,%d)\n",
406 (intmax_t)bp
->b_loffset
, bp
->b_bcount
, maxra
);
408 if ((bp
->b_flags
& B_CLUSTER
) == 0)
409 vfs_busy_pages(vp
, bp
);
410 bp
->b_flags
&= ~(B_ERROR
|B_INVAL
);
411 vn_strategy(vp
, &bp
->b_bio1
);
418 * If we have been doing sequential I/O, then do some read-ahead.
419 * The code above us should have positioned us at the next likely
422 * Only mess with buffers which we can immediately lock. HAMMER
423 * will do device-readahead irrespective of what the blocks
426 while (error
== 0 && maxra
> 0) {
431 rbp
= getblk(vp
, loffset
, blksize
,
432 GETBLK_SZMATCH
|GETBLK_NOWAIT
, 0);
435 if ((rbp
->b_flags
& B_CACHE
)) {
441 * An error from the read-ahead bmap has nothing to do
442 * with the caller's original request.
444 tmp_error
= VOP_BMAP(vp
, loffset
, &doffset
,
445 &burstbytes
, NULL
, BUF_CMD_READ
);
446 if (tmp_error
|| doffset
== NOOFFSET
) {
447 rbp
->b_flags
|= B_INVAL
;
452 if ((nblks
= maxra
) > maxrbuild
)
454 if (nblks
> burstbytes
/ blksize
)
455 nblks
= burstbytes
/ blksize
;
460 rbp
->b_cmd
= BUF_CMD_READ
;
461 /*rbp->b_flags |= B_AGE*/;
465 rbp
= cluster_rbuild(vp
, filesize
, loffset
,
469 rbp
->b_bio2
.bio_offset
= doffset
;
472 rbp
->b_flags
&= ~(B_ERROR
|B_INVAL
);
474 if ((rbp
->b_flags
& B_CLUSTER
) == 0)
475 vfs_busy_pages(vp
, rbp
);
477 loffset
+= rbp
->b_bufsize
;
478 maxra
-= rbp
->b_bufsize
/ blksize
;
479 vn_strategy(vp
, &rbp
->b_bio1
);
480 /* rbp invalid now */
484 * Wait for our original buffer to complete its I/O. reqbp will
485 * be NULL if the original buffer was B_CACHE. We are returning
486 * (*bpp) which is the same as reqbp when reqbp != NULL.
490 KKASSERT(reqbp
->b_bio1
.bio_flags
& BIO_SYNC
);
491 error
= biowait(&reqbp
->b_bio1
, "clurd");
497 * This replaces breadcb(), providing an asynchronous read of the requested
498 * buffer with a callback, plus an asynchronous read-ahead within the
501 * The callback must check whether BIO_DONE is set in the bio and issue
502 * the bpdone(bp, 0) if it isn't. The callback is responsible for clearing
503 * BIO_DONE and disposing of the I/O (bqrelse()ing it).
505 * filesize - read-ahead @ blksize will not cross this boundary
506 * loffset - loffset for returned *bpp
507 * blksize - blocksize for returned *bpp and read-ahead bps
508 * minreq - minimum (not a hard minimum) in bytes, typically reflects
509 * a higher level uio resid.
510 * maxreq - maximum (sequential heuristic) in bytes (highet typ ~2MB)
511 * bpp - return buffer (*bpp) for (loffset,blksize)
514 cluster_readcb(struct vnode
*vp
, off_t filesize
, off_t loffset
,
515 int blksize
, size_t minreq
, size_t maxreq
,
516 void (*func
)(struct bio
*), void *arg
)
518 struct buf
*bp
, *rbp
, *reqbp
;
526 * Calculate the desired read-ahead in blksize'd blocks (maxra).
527 * To do this we calculate maxreq.
529 * maxreq typically starts out as a sequential heuristic. If the
530 * high level uio/resid is bigger (minreq), we pop maxreq up to
531 * minreq. This represents the case where random I/O is being
532 * performed by the userland is issuing big read()'s.
534 * Then we limit maxreq to max_readahead to ensure it is a reasonable
537 * Finally we must ensure that (loffset + maxreq) does not cross the
538 * boundary (filesize) for the current blocksize. If we allowed it
539 * to cross we could end up with buffers past the boundary with the
540 * wrong block size (HAMMER large-data areas use mixed block sizes).
541 * minreq is also absolutely limited to filesize.
545 /* minreq not used beyond this point */
547 if (maxreq
> max_readahead
) {
548 maxreq
= max_readahead
;
549 if (maxreq
> 16 * 1024 * 1024)
550 maxreq
= 16 * 1024 * 1024;
552 if (maxreq
< blksize
)
554 if (loffset
+ maxreq
> filesize
) {
555 if (loffset
> filesize
)
558 maxreq
= filesize
- loffset
;
561 maxra
= (int)(maxreq
/ blksize
);
564 * Get the requested block.
566 reqbp
= bp
= getblk(vp
, loffset
, blksize
, 0, 0);
567 origoffset
= loffset
;
570 * Calculate the maximum cluster size for a single I/O, used
571 * by cluster_rbuild().
573 maxrbuild
= vmaxiosize(vp
) / blksize
;
576 * if it is in the cache, then check to see if the reads have been
577 * sequential. If they have, then try some read-ahead, otherwise
578 * back-off on prospective read-aheads.
580 if (bp
->b_flags
& B_CACHE
) {
582 * Setup for func() call whether we do read-ahead or not.
584 bp
->b_bio1
.bio_caller_info1
.ptr
= arg
;
585 bp
->b_bio1
.bio_flags
|= BIO_DONE
;
588 * Not sequential, do not do any read-ahead
594 * No read-ahead mark, do not do any read-ahead
597 if ((bp
->b_flags
& B_RAM
) == 0)
599 bp
->b_flags
&= ~B_RAM
;
602 * We hit a read-ahead-mark, figure out how much read-ahead
603 * to do (maxra) and where to start (loffset).
605 * Shortcut the scan. Typically the way this works is that
606 * we've built up all the blocks inbetween except for the
607 * last in previous iterations, so if the second-to-last
608 * block is present we just skip ahead to it.
610 * This algorithm has O(1) cpu in the steady state no
611 * matter how large maxra is.
613 if (findblk(vp
, loffset
+ (maxra
- 2) * blksize
, FINDBLK_TEST
))
618 if (findblk(vp
, loffset
+ i
* blksize
,
619 FINDBLK_TEST
) == NULL
) {
626 * We got everything or everything is in the cache, no
633 * Calculate where to start the read-ahead and how much
634 * to do. Generally speaking we want to read-ahead by
635 * (maxra) when we've found a read-ahead mark. We do
636 * not want to reduce maxra here as it will cause
637 * successive read-ahead I/O's to be smaller and smaller.
639 * However, we have to make sure we don't break the
640 * filesize limitation for the clustered operation.
642 loffset
+= i
* blksize
;
644 /* leave reqbp intact to force function callback */
646 if (loffset
>= filesize
)
648 if (loffset
+ maxra
* blksize
> filesize
) {
649 maxreq
= filesize
- loffset
;
650 maxra
= (int)(maxreq
/ blksize
);
653 __debugvar off_t firstread
= bp
->b_loffset
;
658 * Set-up synchronous read for bp.
660 bp
->b_flags
&= ~(B_ERROR
| B_EINTR
| B_INVAL
);
661 bp
->b_cmd
= BUF_CMD_READ
;
662 bp
->b_bio1
.bio_done
= func
;
663 bp
->b_bio1
.bio_caller_info1
.ptr
= arg
;
665 reqbp
= NULL
; /* don't func() reqbp, it's running async */
667 KASSERT(firstread
!= NOOFFSET
,
668 ("cluster_read: no buffer offset"));
671 * nblks is our cluster_rbuild request size, limited
672 * primarily by the device.
674 if ((nblks
= maxra
) > maxrbuild
)
680 tmp_error
= VOP_BMAP(vp
, loffset
, &doffset
,
681 &burstbytes
, NULL
, BUF_CMD_READ
);
683 goto single_block_read
;
684 if (nblks
> burstbytes
/ blksize
)
685 nblks
= burstbytes
/ blksize
;
686 if (doffset
== NOOFFSET
)
687 goto single_block_read
;
689 goto single_block_read
;
691 bp
= cluster_rbuild(vp
, filesize
, loffset
,
692 doffset
, blksize
, nblks
, bp
);
693 loffset
+= bp
->b_bufsize
;
694 maxra
-= bp
->b_bufsize
/ blksize
;
698 * If it isn't in the cache, then get a chunk from
699 * disk if sequential, otherwise just get the block.
708 * If bp != NULL then B_CACHE was *NOT* set and bp must be issued.
709 * bp will either be an asynchronous cluster buf or an asynchronous
712 * NOTE: Once an async cluster buf is issued bp becomes invalid.
715 #if defined(CLUSTERDEBUG)
717 kprintf("S(%012jx,%d,%d)\n",
718 (intmax_t)bp
->b_loffset
, bp
->b_bcount
, maxra
);
720 if ((bp
->b_flags
& B_CLUSTER
) == 0)
721 vfs_busy_pages(vp
, bp
);
722 bp
->b_flags
&= ~(B_ERROR
|B_INVAL
);
723 vn_strategy(vp
, &bp
->b_bio1
);
729 * If we have been doing sequential I/O, then do some read-ahead.
730 * The code above us should have positioned us at the next likely
733 * Only mess with buffers which we can immediately lock. HAMMER
734 * will do device-readahead irrespective of what the blocks
742 rbp
= getblk(vp
, loffset
, blksize
,
743 GETBLK_SZMATCH
|GETBLK_NOWAIT
, 0);
746 if ((rbp
->b_flags
& B_CACHE
)) {
752 * An error from the read-ahead bmap has nothing to do
753 * with the caller's original request.
755 tmp_error
= VOP_BMAP(vp
, loffset
, &doffset
,
756 &burstbytes
, NULL
, BUF_CMD_READ
);
757 if (tmp_error
|| doffset
== NOOFFSET
) {
758 rbp
->b_flags
|= B_INVAL
;
763 if ((nblks
= maxra
) > maxrbuild
)
765 if (nblks
> burstbytes
/ blksize
)
766 nblks
= burstbytes
/ blksize
;
771 rbp
->b_cmd
= BUF_CMD_READ
;
772 /*rbp->b_flags |= B_AGE*/;
776 rbp
= cluster_rbuild(vp
, filesize
, loffset
,
780 rbp
->b_bio2
.bio_offset
= doffset
;
783 rbp
->b_flags
&= ~(B_ERROR
|B_INVAL
);
785 if ((rbp
->b_flags
& B_CLUSTER
) == 0)
786 vfs_busy_pages(vp
, rbp
);
788 loffset
+= rbp
->b_bufsize
;
789 maxra
-= rbp
->b_bufsize
/ blksize
;
790 vn_strategy(vp
, &rbp
->b_bio1
);
791 /* rbp invalid now */
795 * If reqbp is non-NULL it had B_CACHE set and we issue the
796 * function callback synchronously.
798 * Note that we may start additional asynchronous I/O before doing
799 * the func() callback for the B_CACHE case
803 func(&reqbp
->b_bio1
);
807 * If blocks are contiguous on disk, use this to provide clustered
808 * read ahead. We will read as many blocks as possible sequentially
809 * and then parcel them up into logical blocks in the buffer hash table.
811 * This function either returns a cluster buf or it returns fbp. fbp is
812 * already expected to be set up as a synchronous or asynchronous request.
814 * If a cluster buf is returned it will always be async.
817 cluster_rbuild(struct vnode
*vp
, off_t filesize
, off_t loffset
, off_t doffset
,
818 int blksize
, int run
, struct buf
*fbp
)
820 struct buf
*bp
, *tbp
;
823 int maxiosize
= vmaxiosize(vp
);
828 while (loffset
+ run
* blksize
> filesize
) {
833 tbp
->b_bio2
.bio_offset
= doffset
;
834 if((tbp
->b_flags
& B_MALLOC
) ||
835 ((tbp
->b_flags
& B_VMIO
) == 0) || (run
<= 1)) {
839 bp
= trypbuf_kva(&cluster_pbuf_freecnt
);
845 * We are synthesizing a buffer out of vm_page_t's, but
846 * if the block size is not page aligned then the starting
847 * address may not be either. Inherit the b_data offset
848 * from the original buffer.
850 bp
->b_data
= (char *)((vm_offset_t
)bp
->b_data
|
851 ((vm_offset_t
)tbp
->b_data
& PAGE_MASK
));
852 bp
->b_flags
|= B_CLUSTER
| B_VMIO
;
853 bp
->b_cmd
= BUF_CMD_READ
;
854 bp
->b_bio1
.bio_done
= cluster_callback
; /* default to async */
855 bp
->b_bio1
.bio_caller_info1
.cluster_head
= NULL
;
856 bp
->b_bio1
.bio_caller_info2
.cluster_tail
= NULL
;
857 bp
->b_loffset
= loffset
;
858 bp
->b_bio2
.bio_offset
= doffset
;
859 KASSERT(bp
->b_loffset
!= NOOFFSET
,
860 ("cluster_rbuild: no buffer offset"));
864 bp
->b_xio
.xio_npages
= 0;
866 for (boffset
= doffset
, i
= 0; i
< run
; ++i
, boffset
+= blksize
) {
868 if ((bp
->b_xio
.xio_npages
* PAGE_SIZE
) +
869 round_page(blksize
) > maxiosize
) {
874 * Shortcut some checks and try to avoid buffers that
875 * would block in the lock. The same checks have to
876 * be made again after we officially get the buffer.
878 tbp
= getblk(vp
, loffset
+ i
* blksize
, blksize
,
879 GETBLK_SZMATCH
|GETBLK_NOWAIT
, 0);
882 for (j
= 0; j
< tbp
->b_xio
.xio_npages
; j
++) {
883 if (tbp
->b_xio
.xio_pages
[j
]->valid
)
886 if (j
!= tbp
->b_xio
.xio_npages
) {
892 * Stop scanning if the buffer is fuly valid
893 * (marked B_CACHE), or locked (may be doing a
894 * background write), or if the buffer is not
895 * VMIO backed. The clustering code can only deal
896 * with VMIO-backed buffers.
898 if ((tbp
->b_flags
& (B_CACHE
|B_LOCKED
)) ||
899 (tbp
->b_flags
& B_VMIO
) == 0 ||
900 (LIST_FIRST(&tbp
->b_dep
) != NULL
&&
908 * The buffer must be completely invalid in order to
909 * take part in the cluster. If it is partially valid
912 for (j
= 0;j
< tbp
->b_xio
.xio_npages
; j
++) {
913 if (tbp
->b_xio
.xio_pages
[j
]->valid
)
916 if (j
!= tbp
->b_xio
.xio_npages
) {
922 * Set a read-ahead mark as appropriate. Always
923 * set the read-ahead mark at (run - 1). It is
924 * unclear why we were also setting it at i == 1.
926 if (/*i == 1 ||*/ i
== (run
- 1))
930 * Depress the priority of buffers not explicitly
933 /* tbp->b_flags |= B_AGE; */
936 * Set the block number if it isn't set, otherwise
937 * if it is make sure it matches the block number we
940 if (tbp
->b_bio2
.bio_offset
== NOOFFSET
) {
941 tbp
->b_bio2
.bio_offset
= boffset
;
942 } else if (tbp
->b_bio2
.bio_offset
!= boffset
) {
949 * The passed-in tbp (i == 0) will already be set up for
950 * async or sync operation. All other tbp's acquire in
951 * our loop are set up for async operation.
953 tbp
->b_cmd
= BUF_CMD_READ
;
955 cluster_append(&bp
->b_bio1
, tbp
);
956 for (j
= 0; j
< tbp
->b_xio
.xio_npages
; ++j
) {
959 m
= tbp
->b_xio
.xio_pages
[j
];
960 vm_page_busy_wait(m
, FALSE
, "clurpg");
963 vm_object_pip_add(m
->object
, 1);
964 if ((bp
->b_xio
.xio_npages
== 0) ||
965 (bp
->b_xio
.xio_pages
[bp
->b_xio
.xio_npages
-1] != m
)) {
966 bp
->b_xio
.xio_pages
[bp
->b_xio
.xio_npages
] = m
;
967 bp
->b_xio
.xio_npages
++;
969 if ((m
->valid
& VM_PAGE_BITS_ALL
) == VM_PAGE_BITS_ALL
)
970 tbp
->b_xio
.xio_pages
[j
] = bogus_page
;
973 * XXX shouldn't this be += size for both, like in
976 * Don't inherit tbp->b_bufsize as it may be larger due to
977 * a non-page-aligned size. Instead just aggregate using
980 if (tbp
->b_bcount
!= blksize
)
981 kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp
->b_bcount
, blksize
);
982 if (tbp
->b_bufsize
!= blksize
)
983 kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp
->b_bufsize
, blksize
);
984 bp
->b_bcount
+= blksize
;
985 bp
->b_bufsize
+= blksize
;
989 * Fully valid pages in the cluster are already good and do not need
990 * to be re-read from disk. Replace the page with bogus_page
992 for (j
= 0; j
< bp
->b_xio
.xio_npages
; j
++) {
993 if ((bp
->b_xio
.xio_pages
[j
]->valid
& VM_PAGE_BITS_ALL
) ==
995 bp
->b_xio
.xio_pages
[j
] = bogus_page
;
998 if (bp
->b_bufsize
> bp
->b_kvasize
) {
999 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
1000 bp
->b_bufsize
, bp
->b_kvasize
);
1002 pmap_qenter(trunc_page((vm_offset_t
) bp
->b_data
),
1003 (vm_page_t
*)bp
->b_xio
.xio_pages
, bp
->b_xio
.xio_npages
);
1009 * Cleanup after a clustered read or write.
1010 * This is complicated by the fact that any of the buffers might have
1011 * extra memory (if there were no empty buffer headers at allocbuf time)
1012 * that we will need to shift around.
1014 * The returned bio is &bp->b_bio1
1017 cluster_callback(struct bio
*bio
)
1019 struct buf
*bp
= bio
->bio_buf
;
1024 * Must propogate errors to all the components. A short read (EOF)
1025 * is a critical error.
1027 if (bp
->b_flags
& B_ERROR
) {
1028 error
= bp
->b_error
;
1029 } else if (bp
->b_bcount
!= bp
->b_bufsize
) {
1030 panic("cluster_callback: unexpected EOF on cluster %p!", bio
);
1033 pmap_qremove(trunc_page((vm_offset_t
) bp
->b_data
), bp
->b_xio
.xio_npages
);
1035 * Move memory from the large cluster buffer into the component
1036 * buffers and mark IO as done on these. Since the memory map
1037 * is the same, no actual copying is required.
1039 while ((tbp
= bio
->bio_caller_info1
.cluster_head
) != NULL
) {
1040 bio
->bio_caller_info1
.cluster_head
= tbp
->b_cluster_next
;
1042 tbp
->b_flags
|= B_ERROR
| B_IODEBUG
;
1043 tbp
->b_error
= error
;
1045 tbp
->b_dirtyoff
= tbp
->b_dirtyend
= 0;
1046 tbp
->b_flags
&= ~(B_ERROR
|B_INVAL
);
1047 tbp
->b_flags
|= B_IODEBUG
;
1049 * XXX the bdwrite()/bqrelse() issued during
1050 * cluster building clears B_RELBUF (see bqrelse()
1051 * comment). If direct I/O was specified, we have
1052 * to restore it here to allow the buffer and VM
1055 if (tbp
->b_flags
& B_DIRECT
)
1056 tbp
->b_flags
|= B_RELBUF
;
1058 biodone(&tbp
->b_bio1
);
1060 relpbuf(bp
, &cluster_pbuf_freecnt
);
1064 * Implement modified write build for cluster.
1066 * write_behind = 0 write behind disabled
1067 * write_behind = 1 write behind normal (default)
1068 * write_behind = 2 write behind backed-off
1070 * In addition, write_behind is only activated for files that have
1071 * grown past a certain size (default 10MB). Otherwise temporary files
1072 * wind up generating a lot of unnecessary disk I/O.
1075 cluster_wbuild_wb(struct vnode
*vp
, int blksize
, off_t start_loffset
, int len
)
1079 switch(write_behind
) {
1081 if (start_loffset
< len
)
1083 start_loffset
-= len
;
1086 if (vp
->v_filesize
>= write_behind_minfilesize
) {
1087 r
= cluster_wbuild(vp
, NULL
, blksize
,
1088 start_loffset
, len
);
1099 * Do clustered write for FFS.
1102 * 1. Write is not sequential (write asynchronously)
1103 * Write is sequential:
1104 * 2. beginning of cluster - begin cluster
1105 * 3. middle of a cluster - add to cluster
1106 * 4. end of a cluster - asynchronously write cluster
1108 * WARNING! vnode fields are not locked and must ONLY be used heuristically.
1111 cluster_write(struct buf
*bp
, off_t filesize
, int blksize
, int seqcount
)
1115 int maxclen
, cursize
;
1117 cluster_cache_t dummy
;
1118 cluster_cache_t
*cc
;
1121 if (vp
->v_type
== VREG
)
1122 async
= vp
->v_mount
->mnt_flag
& MNT_ASYNC
;
1125 loffset
= bp
->b_loffset
;
1126 KASSERT(bp
->b_loffset
!= NOOFFSET
,
1127 ("cluster_write: no buffer offset"));
1129 cc
= cluster_getcache(&dummy
, vp
, loffset
);
1132 * Initialize vnode to beginning of file.
1135 cc
->v_lasta
= cc
->v_clen
= cc
->v_cstart
= cc
->v_lastw
= 0;
1137 if (cc
->v_clen
== 0 || loffset
!= cc
->v_lastw
+ blksize
||
1138 bp
->b_bio2
.bio_offset
== NOOFFSET
||
1139 (bp
->b_bio2
.bio_offset
!= cc
->v_lasta
+ blksize
)) {
1140 maxclen
= vmaxiosize(vp
);
1141 if (cc
->v_clen
!= 0) {
1143 * Next block is not sequential.
1145 * If we are not writing at end of file, the process
1146 * seeked to another point in the file since its last
1147 * write, or we have reached our maximum cluster size,
1148 * then push the previous cluster. Otherwise try
1149 * reallocating to make it sequential.
1151 * Change to algorithm: only push previous cluster if
1152 * it was sequential from the point of view of the
1153 * seqcount heuristic, otherwise leave the buffer
1154 * intact so we can potentially optimize the I/O
1155 * later on in the buf_daemon or update daemon
1158 cursize
= cc
->v_lastw
- cc
->v_cstart
+ blksize
;
1159 if (bp
->b_loffset
+ blksize
< filesize
||
1160 loffset
!= cc
->v_lastw
+ blksize
||
1161 cc
->v_clen
<= cursize
) {
1162 if (!async
&& seqcount
> 0) {
1163 cluster_wbuild_wb(vp
, blksize
,
1164 cc
->v_cstart
, cursize
);
1167 struct buf
**bpp
, **endbp
;
1168 struct cluster_save
*buflist
;
1170 buflist
= cluster_collectbufs(cc
, vp
,
1172 endbp
= &buflist
->bs_children
1173 [buflist
->bs_nchildren
- 1];
1174 if (VOP_REALLOCBLKS(vp
, buflist
)) {
1176 * Failed, push the previous cluster
1177 * if *really* writing sequentially
1178 * in the logical file (seqcount > 1),
1179 * otherwise delay it in the hopes that
1180 * the low level disk driver can
1181 * optimize the write ordering.
1183 * NOTE: We do not brelse the last
1184 * element which is bp, and we
1185 * do not return here.
1187 for (bpp
= buflist
->bs_children
;
1190 kfree(buflist
, M_SEGMENT
);
1192 cluster_wbuild_wb(vp
,
1193 blksize
, cc
->v_cstart
,
1198 * Succeeded, keep building cluster.
1200 for (bpp
= buflist
->bs_children
;
1201 bpp
<= endbp
; bpp
++)
1203 kfree(buflist
, M_SEGMENT
);
1204 cc
->v_lastw
= loffset
;
1205 cc
->v_lasta
= bp
->b_bio2
.bio_offset
;
1206 cluster_putcache(cc
);
1212 * Consider beginning a cluster. If at end of file, make
1213 * cluster as large as possible, otherwise find size of
1216 if ((vp
->v_type
== VREG
) &&
1217 bp
->b_loffset
+ blksize
< filesize
&&
1218 (bp
->b_bio2
.bio_offset
== NOOFFSET
) &&
1219 (VOP_BMAP(vp
, loffset
, &bp
->b_bio2
.bio_offset
, &maxclen
, NULL
, BUF_CMD_WRITE
) ||
1220 bp
->b_bio2
.bio_offset
== NOOFFSET
)) {
1223 cc
->v_lasta
= bp
->b_bio2
.bio_offset
;
1224 cc
->v_cstart
= loffset
+ blksize
;
1225 cc
->v_lastw
= loffset
;
1226 cluster_putcache(cc
);
1229 if (maxclen
> blksize
)
1230 cc
->v_clen
= maxclen
- blksize
;
1233 if (!async
&& cc
->v_clen
== 0) { /* I/O not contiguous */
1234 cc
->v_cstart
= loffset
+ blksize
;
1236 } else { /* Wait for rest of cluster */
1237 cc
->v_cstart
= loffset
;
1240 } else if (loffset
== cc
->v_cstart
+ cc
->v_clen
) {
1242 * At end of cluster, write it out if seqcount tells us we
1243 * are operating sequentially, otherwise let the buf or
1244 * update daemon handle it.
1248 cluster_wbuild_wb(vp
, blksize
, cc
->v_cstart
,
1249 cc
->v_clen
+ blksize
);
1251 cc
->v_cstart
= loffset
+ blksize
;
1252 } else if (vm_page_count_severe() &&
1253 bp
->b_loffset
+ blksize
< filesize
) {
1255 * We are low on memory, get it going NOW. However, do not
1256 * try to push out a partial block at the end of the file
1257 * as this could lead to extremely non-optimal write activity.
1262 * In the middle of a cluster, so just delay the I/O for now.
1266 cc
->v_lastw
= loffset
;
1267 cc
->v_lasta
= bp
->b_bio2
.bio_offset
;
1268 cluster_putcache(cc
);
1272 * This is the clustered version of bawrite(). It works similarly to
1273 * cluster_write() except I/O on the buffer is guaranteed to occur.
1276 cluster_awrite(struct buf
*bp
)
1281 * Don't bother if it isn't clusterable.
1283 if ((bp
->b_flags
& B_CLUSTEROK
) == 0 ||
1285 (bp
->b_vp
->v_flag
& VOBJBUF
) == 0) {
1286 total
= bp
->b_bufsize
;
1291 total
= cluster_wbuild(bp
->b_vp
, &bp
, bp
->b_bufsize
,
1292 bp
->b_loffset
, vmaxiosize(bp
->b_vp
));
1300 * This is an awful lot like cluster_rbuild...wish they could be combined.
1301 * The last lbn argument is the current block on which I/O is being
1302 * performed. Check to see that it doesn't fall in the middle of
1303 * the current block (if last_bp == NULL).
1305 * cluster_wbuild() normally does not guarantee anything. If bpp is
1306 * non-NULL and cluster_wbuild() is able to incorporate it into the
1307 * I/O it will set *bpp to NULL, otherwise it will leave it alone and
1308 * the caller must dispose of *bpp.
1311 cluster_wbuild(struct vnode
*vp
, struct buf
**bpp
,
1312 int blksize
, off_t start_loffset
, int bytes
)
1314 struct buf
*bp
, *tbp
;
1316 int totalwritten
= 0;
1318 int maxiosize
= vmaxiosize(vp
);
1322 * If the buffer matches the passed locked & removed buffer
1323 * we used the passed buffer (which might not be B_DELWRI).
1325 * Otherwise locate the buffer and determine if it is
1328 if (bpp
&& (*bpp
)->b_loffset
== start_loffset
) {
1333 tbp
= findblk(vp
, start_loffset
, FINDBLK_NBLOCK
);
1335 (tbp
->b_flags
& (B_LOCKED
| B_INVAL
| B_DELWRI
)) !=
1337 (LIST_FIRST(&tbp
->b_dep
) && buf_checkwrite(tbp
))) {
1340 start_loffset
+= blksize
;
1346 KKASSERT(tbp
->b_cmd
== BUF_CMD_DONE
);
1349 * Extra memory in the buffer, punt on this buffer.
1350 * XXX we could handle this in most cases, but we would
1351 * have to push the extra memory down to after our max
1352 * possible cluster size and then potentially pull it back
1353 * up if the cluster was terminated prematurely--too much
1356 if (((tbp
->b_flags
& (B_CLUSTEROK
|B_MALLOC
)) != B_CLUSTEROK
) ||
1357 (tbp
->b_bcount
!= tbp
->b_bufsize
) ||
1358 (tbp
->b_bcount
!= blksize
) ||
1359 (bytes
== blksize
) ||
1360 ((bp
= getpbuf_kva(&cluster_pbuf_freecnt
)) == NULL
)) {
1361 totalwritten
+= tbp
->b_bufsize
;
1363 start_loffset
+= blksize
;
1369 * Set up the pbuf. Track our append point with b_bcount
1370 * and b_bufsize. b_bufsize is not used by the device but
1371 * our caller uses it to loop clusters and we use it to
1372 * detect a premature EOF on the block device.
1376 bp
->b_xio
.xio_npages
= 0;
1377 bp
->b_loffset
= tbp
->b_loffset
;
1378 bp
->b_bio2
.bio_offset
= tbp
->b_bio2
.bio_offset
;
1381 * We are synthesizing a buffer out of vm_page_t's, but
1382 * if the block size is not page aligned then the starting
1383 * address may not be either. Inherit the b_data offset
1384 * from the original buffer.
1386 bp
->b_data
= (char *)((vm_offset_t
)bp
->b_data
|
1387 ((vm_offset_t
)tbp
->b_data
& PAGE_MASK
));
1388 bp
->b_flags
&= ~B_ERROR
;
1389 bp
->b_flags
|= B_CLUSTER
| B_BNOCLIP
|
1390 (tbp
->b_flags
& (B_VMIO
| B_NEEDCOMMIT
));
1391 bp
->b_bio1
.bio_caller_info1
.cluster_head
= NULL
;
1392 bp
->b_bio1
.bio_caller_info2
.cluster_tail
= NULL
;
1395 * From this location in the file, scan forward to see
1396 * if there are buffers with adjacent data that need to
1397 * be written as well.
1399 * IO *must* be initiated on index 0 at this point
1400 * (particularly when called from cluster_awrite()).
1402 for (i
= 0; i
< bytes
; (i
+= blksize
), (start_loffset
+= blksize
)) {
1410 tbp
= findblk(vp
, start_loffset
,
1413 * Buffer not found or could not be locked
1420 * If it IS in core, but has different
1421 * characteristics, then don't cluster
1424 if ((tbp
->b_flags
& (B_VMIO
| B_CLUSTEROK
|
1425 B_INVAL
| B_DELWRI
| B_NEEDCOMMIT
))
1426 != (B_DELWRI
| B_CLUSTEROK
|
1427 (bp
->b_flags
& (B_VMIO
| B_NEEDCOMMIT
))) ||
1428 (tbp
->b_flags
& B_LOCKED
)
1435 * Check that the combined cluster
1436 * would make sense with regard to pages
1437 * and would not be too large
1439 * WARNING! buf_checkwrite() must be the last
1440 * check made. If it returns 0 then
1441 * we must initiate the I/O.
1443 if ((tbp
->b_bcount
!= blksize
) ||
1444 ((bp
->b_bio2
.bio_offset
+ i
) !=
1445 tbp
->b_bio2
.bio_offset
) ||
1446 ((tbp
->b_xio
.xio_npages
+ bp
->b_xio
.xio_npages
) >
1447 (maxiosize
/ PAGE_SIZE
)) ||
1448 (LIST_FIRST(&tbp
->b_dep
) &&
1449 buf_checkwrite(tbp
))
1454 if (LIST_FIRST(&tbp
->b_dep
))
1457 * Ok, it's passed all the tests,
1458 * so remove it from the free list
1459 * and mark it busy. We will use it.
1462 KKASSERT(tbp
->b_cmd
== BUF_CMD_DONE
);
1466 * If the IO is via the VM then we do some
1467 * special VM hackery (yuck). Since the buffer's
1468 * block size may not be page-aligned it is possible
1469 * for a page to be shared between two buffers. We
1470 * have to get rid of the duplication when building
1473 if (tbp
->b_flags
& B_VMIO
) {
1477 * Try to avoid deadlocks with the VM system.
1478 * However, we cannot abort the I/O if
1479 * must_initiate is non-zero.
1481 if (must_initiate
== 0) {
1483 j
< tbp
->b_xio
.xio_npages
;
1485 m
= tbp
->b_xio
.xio_pages
[j
];
1486 if (m
->flags
& PG_BUSY
) {
1493 for (j
= 0; j
< tbp
->b_xio
.xio_npages
; ++j
) {
1494 m
= tbp
->b_xio
.xio_pages
[j
];
1495 vm_page_busy_wait(m
, FALSE
, "clurpg");
1496 vm_page_io_start(m
);
1498 vm_object_pip_add(m
->object
, 1);
1499 if ((bp
->b_xio
.xio_npages
== 0) ||
1500 (bp
->b_xio
.xio_pages
[bp
->b_xio
.xio_npages
- 1] != m
)) {
1501 bp
->b_xio
.xio_pages
[bp
->b_xio
.xio_npages
] = m
;
1502 bp
->b_xio
.xio_npages
++;
1506 bp
->b_bcount
+= blksize
;
1507 bp
->b_bufsize
+= blksize
;
1510 tbp
->b_flags
&= ~B_ERROR
;
1511 tbp
->b_cmd
= BUF_CMD_WRITE
;
1513 cluster_append(&bp
->b_bio1
, tbp
);
1516 * check for latent dependencies to be handled
1518 if (LIST_FIRST(&tbp
->b_dep
) != NULL
)
1522 pmap_qenter(trunc_page((vm_offset_t
)bp
->b_data
),
1523 (vm_page_t
*)bp
->b_xio
.xio_pages
,
1524 bp
->b_xio
.xio_npages
);
1525 if (bp
->b_bufsize
> bp
->b_kvasize
) {
1526 panic("cluster_wbuild: b_bufsize(%d) "
1527 "> b_kvasize(%d)\n",
1528 bp
->b_bufsize
, bp
->b_kvasize
);
1530 totalwritten
+= bp
->b_bufsize
;
1532 bp
->b_dirtyend
= bp
->b_bufsize
;
1533 bp
->b_bio1
.bio_done
= cluster_callback
;
1534 bp
->b_cmd
= BUF_CMD_WRITE
;
1536 vfs_busy_pages(vp
, bp
);
1537 bsetrunningbufspace(bp
, bp
->b_bufsize
);
1539 vn_strategy(vp
, &bp
->b_bio1
);
1543 return totalwritten
;
1547 * Collect together all the buffers in a cluster, plus add one
1548 * additional buffer passed-in.
1550 * Only pre-existing buffers whos block size matches blksize are collected.
1551 * (this is primarily because HAMMER1 uses varying block sizes and we don't
1552 * want to override its choices).
1554 * This code will not try to collect buffers that it cannot lock, otherwise
1555 * it might deadlock against SMP-friendly filesystems.
1557 static struct cluster_save
*
1558 cluster_collectbufs(cluster_cache_t
*cc
, struct vnode
*vp
,
1559 struct buf
*last_bp
, int blksize
)
1561 struct cluster_save
*buflist
;
1568 len
= (int)(cc
->v_lastw
- cc
->v_cstart
+ blksize
) / blksize
;
1570 buflist
= kmalloc(sizeof(struct buf
*) * (len
+ 1) + sizeof(*buflist
),
1571 M_SEGMENT
, M_WAITOK
);
1572 buflist
->bs_nchildren
= 0;
1573 buflist
->bs_children
= (struct buf
**) (buflist
+ 1);
1574 for (loffset
= cc
->v_cstart
, i
= 0, j
= 0;
1576 (loffset
+= blksize
), i
++) {
1577 bp
= getcacheblk(vp
, loffset
,
1578 last_bp
->b_bcount
, GETBLK_SZMATCH
|
1580 buflist
->bs_children
[i
] = bp
;
1583 } else if (bp
->b_bio2
.bio_offset
== NOOFFSET
) {
1584 VOP_BMAP(bp
->b_vp
, bp
->b_loffset
,
1585 &bp
->b_bio2
.bio_offset
,
1586 NULL
, NULL
, BUF_CMD_WRITE
);
1593 for (k
= 0; k
< j
; ++k
) {
1594 if (buflist
->bs_children
[k
]) {
1595 bqrelse(buflist
->bs_children
[k
]);
1596 buflist
->bs_children
[k
] = NULL
;
1601 bcopy(buflist
->bs_children
+ j
,
1602 buflist
->bs_children
+ 0,
1603 sizeof(buflist
->bs_children
[0]) * (i
- j
));
1607 buflist
->bs_children
[i
] = bp
= last_bp
;
1608 if (bp
->b_bio2
.bio_offset
== NOOFFSET
) {
1609 VOP_BMAP(bp
->b_vp
, bp
->b_loffset
, &bp
->b_bio2
.bio_offset
,
1610 NULL
, NULL
, BUF_CMD_WRITE
);
1612 buflist
->bs_nchildren
= i
+ 1;
1617 cluster_append(struct bio
*bio
, struct buf
*tbp
)
1619 tbp
->b_cluster_next
= NULL
;
1620 if (bio
->bio_caller_info1
.cluster_head
== NULL
) {
1621 bio
->bio_caller_info1
.cluster_head
= tbp
;
1622 bio
->bio_caller_info2
.cluster_tail
= tbp
;
1624 bio
->bio_caller_info2
.cluster_tail
->b_cluster_next
= tbp
;
1625 bio
->bio_caller_info2
.cluster_tail
= tbp
;
1631 cluster_setram (struct buf
*bp
)
1633 bp
->b_flags
|= B_RAM
;
1634 if (bp
->b_xio
.xio_npages
)
1635 vm_page_flag_set(bp
->b_xio
.xio_pages
[0], PG_RAM
);