3 * The Regents of the University of California. All rights reserved.
4 * Modifications/enhancements:
5 * Copyright (c) 1995 John S. Dyson. All rights reserved.
6 * Copyright (c) 2012-2013 Matthew Dillon. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include "opt_debug_cluster.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
40 #include <sys/vnode.h>
41 #include <sys/malloc.h>
42 #include <sys/mount.h>
43 #include <sys/resourcevar.h>
44 #include <sys/vmmeter.h>
46 #include <vm/vm_object.h>
47 #include <vm/vm_page.h>
48 #include <sys/sysctl.h>
51 #include <vm/vm_page2.h>
53 #include <machine/limits.h>
56 * Cluster tracking cache - replaces the original vnode v_* fields which had
57 * limited utility and were not MP safe.
59 * The cluster tracking cache is a simple 4-way set-associative non-chained
60 * cache. It is capable of tracking up to four zones separated by 1MB or
63 * NOTE: We want this structure to be cache-line friendly so the iterator
64 * is embedded rather than in a separate array.
66 * NOTE: A cluster cache entry can become stale when a vnode is recycled.
67 * For now we treat the values as heuristical but also self-consistent.
68 * i.e. the values cannot be completely random and cannot be SMP unsafe
69 * or the cluster code might end-up clustering non-contiguous buffers
70 * at the wrong offsets.
72 struct cluster_cache
{
75 off_t v_lastw
; /* last write (end) (write cluster) */
76 off_t v_cstart
; /* start block (beg) of cluster */
77 off_t v_lasta
; /* last allocation (end) */
78 u_int v_clen
; /* length of current cluster */
82 typedef struct cluster_cache cluster_cache_t
;
84 #define CLUSTER_CACHE_SIZE 512
85 #define CLUSTER_CACHE_MASK (CLUSTER_CACHE_SIZE - 1)
87 #define CLUSTER_ZONE ((off_t)(1024 * 1024))
89 cluster_cache_t cluster_array
[CLUSTER_CACHE_SIZE
];
91 #if defined(CLUSTERDEBUG)
92 #include <sys/sysctl.h>
93 static int rcluster
= 0;
94 SYSCTL_INT(_debug
, OID_AUTO
, rcluster
, CTLFLAG_RW
, &rcluster
, 0, "");
97 static MALLOC_DEFINE(M_SEGMENT
, "cluster_save", "cluster_save buffer");
99 static struct cluster_save
*
100 cluster_collectbufs (cluster_cache_t
*cc
, struct vnode
*vp
,
101 struct buf
*last_bp
, int blksize
);
103 cluster_rbuild (struct vnode
*vp
, off_t filesize
, off_t loffset
,
104 off_t doffset
, int blksize
, int run
,
105 struct buf
*fbp
, int *srp
);
106 static void cluster_callback (struct bio
*);
107 static void cluster_setram (struct buf
*);
108 static void cluster_clrram (struct buf
*);
109 static int cluster_wbuild(struct vnode
*vp
, struct buf
**bpp
, int blksize
,
110 off_t start_loffset
, int bytes
);
112 static int write_behind
= 1;
113 SYSCTL_INT(_vfs
, OID_AUTO
, write_behind
, CTLFLAG_RW
, &write_behind
, 0,
114 "Cluster write-behind setting");
115 static quad_t write_behind_minfilesize
= 10 * 1024 * 1024;
116 SYSCTL_QUAD(_vfs
, OID_AUTO
, write_behind_minfilesize
, CTLFLAG_RW
,
117 &write_behind_minfilesize
, 0, "Cluster write-behind setting");
118 static int max_readahead
= 2 * 1024 * 1024;
119 SYSCTL_INT(_vfs
, OID_AUTO
, max_readahead
, CTLFLAG_RW
, &max_readahead
, 0,
120 "Limit in bytes for desired cluster read-ahead");
122 extern vm_page_t bogus_page
;
125 * nblks is our cluster_rbuild request size. The approximate number of
126 * physical read-ahead requests is maxra / nblks. The physical request
127 * size is limited by the device (maxrbuild). We also do not want to make
128 * the request size too big or it will mess up the B_RAM streaming.
132 calc_rbuild_reqsize(int maxra
, int maxrbuild
)
136 if ((nblks
= maxra
/ 4) > maxrbuild
)
144 * Acquire/release cluster cache (can return dummy entry)
148 cluster_getcache(cluster_cache_t
*dummy
, struct vnode
*vp
, off_t loffset
)
155 hv
= (size_t)(intptr_t)vp
^ (size_t)(intptr_t)vp
/ sizeof(*vp
);
156 hv
&= CLUSTER_CACHE_MASK
& ~3;
157 cc
= &cluster_array
[hv
];
160 for (i
= 0; i
< 4; ++i
) {
163 if (((cc
[i
].v_cstart
^ loffset
) & ~(CLUSTER_ZONE
- 1)) == 0) {
168 if (xact
>= 0 && atomic_swap_int(&cc
[xact
].locked
, 1) == 0) {
169 if (cc
[xact
].vp
== vp
&&
170 ((cc
[i
].v_cstart
^ loffset
) & ~(CLUSTER_ZONE
- 1)) == 0) {
173 atomic_swap_int(&cc
[xact
].locked
, 0);
177 * New entry. If we can't acquire the cache line then use the
178 * passed-in dummy element and reset all fields.
180 * When we are able to acquire the cache line we only clear the
181 * fields if the vp does not match. This allows us to multi-zone
182 * a vp and for excessive zones / partial clusters to be retired.
184 i
= cc
->iterator
++ & 3;
186 if (atomic_swap_int(&cc
->locked
, 1) != 0) {
203 cluster_putcache(cluster_cache_t
*cc
)
205 atomic_swap_int(&cc
->locked
, 0);
209 * This replaces bread(), providing a synchronous read of the requested
210 * buffer plus asynchronous read-ahead within the specified bounds.
212 * The caller may pre-populate *bpp if it already has the requested buffer
213 * in-hand, else must set *bpp to NULL. Note that the cluster_read() inline
214 * sets *bpp to NULL and then calls cluster_readx() for compatibility.
216 * filesize - read-ahead @ blksize will not cross this boundary
217 * loffset - loffset for returned *bpp
218 * blksize - blocksize for returned *bpp and read-ahead bps
219 * minreq - minimum (not a hard minimum) in bytes, typically reflects
220 * a higher level uio resid.
221 * maxreq - maximum (sequential heuristic) in bytes (highet typ ~2MB)
222 * bpp - return buffer (*bpp) for (loffset,blksize)
225 cluster_readx(struct vnode
*vp
, off_t filesize
, off_t loffset
,
226 int blksize
, size_t minreq
, size_t maxreq
, struct buf
**bpp
)
228 struct buf
*bp
, *rbp
, *reqbp
;
240 * Calculate the desired read-ahead in blksize'd blocks (maxra).
241 * To do this we calculate maxreq.
243 * maxreq typically starts out as a sequential heuristic. If the
244 * high level uio/resid is bigger (minreq), we pop maxreq up to
245 * minreq. This represents the case where random I/O is being
246 * performed by the userland is issuing big read()'s.
248 * Then we limit maxreq to max_readahead to ensure it is a reasonable
251 * Finally we must ensure that (loffset + maxreq) does not cross the
252 * boundary (filesize) for the current blocksize. If we allowed it
253 * to cross we could end up with buffers past the boundary with the
254 * wrong block size (HAMMER large-data areas use mixed block sizes).
255 * minreq is also absolutely limited to filesize.
259 /* minreq not used beyond this point */
261 if (maxreq
> max_readahead
) {
262 maxreq
= max_readahead
;
263 if (maxreq
> 16 * 1024 * 1024)
264 maxreq
= 16 * 1024 * 1024;
266 if (maxreq
< blksize
)
268 if (loffset
+ maxreq
> filesize
) {
269 if (loffset
> filesize
)
272 maxreq
= filesize
- loffset
;
275 maxra
= (int)(maxreq
/ blksize
);
278 * Get the requested block.
283 *bpp
= reqbp
= bp
= getblk(vp
, loffset
, blksize
, 0, 0);
284 origoffset
= loffset
;
287 * Calculate the maximum cluster size for a single I/O, used
288 * by cluster_rbuild().
290 maxrbuild
= vmaxiosize(vp
) / blksize
;
293 * If it is in the cache, then check to see if the reads have been
294 * sequential. If they have, then try some read-ahead, otherwise
295 * back-off on prospective read-aheads.
297 if (bp
->b_flags
& B_CACHE
) {
299 * Not sequential, do not do any read-ahead
305 * No read-ahead mark, do not do any read-ahead
308 if ((bp
->b_flags
& B_RAM
) == 0)
312 * We hit a read-ahead-mark, figure out how much read-ahead
313 * to do (maxra) and where to start (loffset).
315 * Typically the way this works is that B_RAM is set in the
316 * middle of the cluster and triggers an overlapping
317 * read-ahead of 1/2 a cluster more blocks. This ensures
318 * that the cluster read-ahead scales with the read-ahead
319 * count and is thus better-able to absorb the caller's
322 * Estimate where the next unread block will be by assuming
323 * that the B_RAM's are placed at the half-way point.
325 bp
->b_flags
&= ~B_RAM
;
328 rbp
= findblk(vp
, loffset
+ i
* blksize
, FINDBLK_TEST
);
329 if (rbp
== NULL
|| (rbp
->b_flags
& B_CACHE
) == 0) {
332 rbp
= findblk(vp
, loffset
+ i
* blksize
,
341 rbp
= findblk(vp
, loffset
+ i
* blksize
,
350 * We got everything or everything is in the cache, no
357 * Calculate where to start the read-ahead and how much
358 * to do. Generally speaking we want to read-ahead by
359 * (maxra) when we've found a read-ahead mark. We do
360 * not want to reduce maxra here as it will cause
361 * successive read-ahead I/O's to be smaller and smaller.
363 * However, we have to make sure we don't break the
364 * filesize limitation for the clustered operation.
366 loffset
+= i
* blksize
;
369 if (loffset
>= filesize
)
371 if (loffset
+ maxra
* blksize
> filesize
) {
372 maxreq
= filesize
- loffset
;
373 maxra
= (int)(maxreq
/ blksize
);
377 * Set RAM on first read-ahead block since we still have
378 * approximate maxra/2 blocks ahead of us that are already
379 * cached or in-progress.
384 * Start block is not valid, we will want to do a
387 __debugvar off_t firstread
= bp
->b_loffset
;
391 * Set-up synchronous read for bp.
393 bp
->b_cmd
= BUF_CMD_READ
;
394 bp
->b_bio1
.bio_done
= biodone_sync
;
395 bp
->b_bio1
.bio_flags
|= BIO_SYNC
;
397 KASSERT(firstread
!= NOOFFSET
,
398 ("cluster_read: no buffer offset"));
400 nblks
= calc_rbuild_reqsize(maxra
, maxrbuild
);
403 * Set RAM half-way through the full-cluster.
405 sr
= (maxra
+ 1) / 2;
410 error
= VOP_BMAP(vp
, loffset
, &doffset
,
411 &burstbytes
, NULL
, BUF_CMD_READ
);
413 goto single_block_read
;
414 if (nblks
> burstbytes
/ blksize
)
415 nblks
= burstbytes
/ blksize
;
416 if (doffset
== NOOFFSET
)
417 goto single_block_read
;
419 goto single_block_read
;
421 bp
= cluster_rbuild(vp
, filesize
, loffset
,
422 doffset
, blksize
, nblks
, bp
, &sr
);
423 loffset
+= bp
->b_bufsize
;
424 maxra
-= bp
->b_bufsize
/ blksize
;
428 * If it isn't in the cache, then get a chunk from
429 * disk if sequential, otherwise just get the block.
437 * If B_CACHE was not set issue bp. bp will either be an
438 * asynchronous cluster buf or a synchronous single-buf.
439 * If it is a single buf it will be the same as reqbp.
441 * NOTE: Once an async cluster buf is issued bp becomes invalid.
444 #if defined(CLUSTERDEBUG)
446 kprintf("S(%012jx,%d,%d)\n",
447 (intmax_t)bp
->b_loffset
, bp
->b_bcount
, maxra
);
449 if ((bp
->b_flags
& B_CLUSTER
) == 0)
450 vfs_busy_pages(vp
, bp
);
451 bp
->b_flags
&= ~(B_ERROR
|B_INVAL
);
452 vn_strategy(vp
, &bp
->b_bio1
);
457 #if defined(CLUSTERDEBUG)
459 kprintf("cluster_rd %016jx/%d maxra=%d sr=%d\n",
460 loffset
, blksize
, maxra
, sr
);
464 * If we have been doing sequential I/O, then do some read-ahead.
465 * The code above us should have positioned us at the next likely
468 * Only mess with buffers which we can immediately lock. HAMMER
469 * will do device-readahead irrespective of what the blocks
472 * Set B_RAM on the first buffer (the next likely offset needing
473 * read-ahead), under the assumption that there are still
474 * approximately maxra/2 blocks good ahead of us.
480 rbp
= getblk(vp
, loffset
, blksize
,
481 GETBLK_SZMATCH
|GETBLK_NOWAIT
, 0);
482 #if defined(CLUSTERDEBUG)
484 kprintf("read-ahead %016jx rbp=%p ",
490 if ((rbp
->b_flags
& B_CACHE
)) {
496 * If BMAP is not supported or has an issue, we still do
497 * (maxra) read-ahead, but we do not try to use rbuild.
499 error
= VOP_BMAP(vp
, loffset
, &doffset
,
500 &burstbytes
, NULL
, BUF_CMD_READ
);
501 if (error
|| doffset
== NOOFFSET
) {
505 nblks
= calc_rbuild_reqsize(maxra
, maxrbuild
);
506 if (nblks
> burstbytes
/ blksize
)
507 nblks
= burstbytes
/ blksize
;
509 rbp
->b_cmd
= BUF_CMD_READ
;
512 rbp
= cluster_rbuild(vp
, filesize
, loffset
,
516 rbp
->b_bio2
.bio_offset
= doffset
;
521 rbp
->b_flags
&= ~(B_ERROR
|B_INVAL
);
523 if ((rbp
->b_flags
& B_CLUSTER
) == 0)
524 vfs_busy_pages(vp
, rbp
);
526 loffset
+= rbp
->b_bufsize
;
527 maxra
-= rbp
->b_bufsize
/ blksize
;
528 vn_strategy(vp
, &rbp
->b_bio1
);
529 /* rbp invalid now */
533 * Wait for our original buffer to complete its I/O. reqbp will
534 * be NULL if the original buffer was B_CACHE. We are returning
535 * (*bpp) which is the same as reqbp when reqbp != NULL.
539 KKASSERT(reqbp
->b_bio1
.bio_flags
& BIO_SYNC
);
540 error
= biowait(&reqbp
->b_bio1
, "clurd");
548 * This replaces breadcb(), providing an asynchronous read of the requested
549 * buffer with a callback, plus an asynchronous read-ahead within the
552 * The callback must check whether BIO_DONE is set in the bio and issue
553 * the bpdone(bp, 0) if it isn't. The callback is responsible for clearing
554 * BIO_DONE and disposing of the I/O (bqrelse()ing it).
556 * filesize - read-ahead @ blksize will not cross this boundary
557 * loffset - loffset for returned *bpp
558 * blksize - blocksize for returned *bpp and read-ahead bps
559 * minreq - minimum (not a hard minimum) in bytes, typically reflects
560 * a higher level uio resid.
561 * maxreq - maximum (sequential heuristic) in bytes (highet typ ~2MB)
562 * bpp - return buffer (*bpp) for (loffset,blksize)
565 cluster_readcb(struct vnode
*vp
, off_t filesize
, off_t loffset
,
566 int blksize
, size_t minreq
, size_t maxreq
,
567 void (*func
)(struct bio
*), void *arg
)
569 struct buf
*bp
, *rbp
, *reqbp
;
580 * Calculate the desired read-ahead in blksize'd blocks (maxra).
581 * To do this we calculate maxreq.
583 * maxreq typically starts out as a sequential heuristic. If the
584 * high level uio/resid is bigger (minreq), we pop maxreq up to
585 * minreq. This represents the case where random I/O is being
586 * performed by the userland is issuing big read()'s.
588 * Then we limit maxreq to max_readahead to ensure it is a reasonable
591 * Finally we must ensure that (loffset + maxreq) does not cross the
592 * boundary (filesize) for the current blocksize. If we allowed it
593 * to cross we could end up with buffers past the boundary with the
594 * wrong block size (HAMMER large-data areas use mixed block sizes).
595 * minreq is also absolutely limited to filesize.
599 /* minreq not used beyond this point */
601 if (maxreq
> max_readahead
) {
602 maxreq
= max_readahead
;
603 if (maxreq
> 16 * 1024 * 1024)
604 maxreq
= 16 * 1024 * 1024;
606 if (maxreq
< blksize
)
608 if (loffset
+ maxreq
> filesize
) {
609 if (loffset
> filesize
)
612 maxreq
= filesize
- loffset
;
615 maxra
= (int)(maxreq
/ blksize
);
618 * Get the requested block.
620 reqbp
= bp
= getblk(vp
, loffset
, blksize
, 0, 0);
621 origoffset
= loffset
;
624 * Calculate the maximum cluster size for a single I/O, used
625 * by cluster_rbuild().
627 maxrbuild
= vmaxiosize(vp
) / blksize
;
630 * if it is in the cache, then check to see if the reads have been
631 * sequential. If they have, then try some read-ahead, otherwise
632 * back-off on prospective read-aheads.
634 if (bp
->b_flags
& B_CACHE
) {
636 * Setup for func() call whether we do read-ahead or not.
638 bp
->b_bio1
.bio_caller_info1
.ptr
= arg
;
639 bp
->b_bio1
.bio_flags
|= BIO_DONE
;
642 * Not sequential, do not do any read-ahead
648 * No read-ahead mark, do not do any read-ahead
651 if ((bp
->b_flags
& B_RAM
) == 0)
653 bp
->b_flags
&= ~B_RAM
;
656 * We hit a read-ahead-mark, figure out how much read-ahead
657 * to do (maxra) and where to start (loffset).
659 * Shortcut the scan. Typically the way this works is that
660 * we've built up all the blocks inbetween except for the
661 * last in previous iterations, so if the second-to-last
662 * block is present we just skip ahead to it.
664 * This algorithm has O(1) cpu in the steady state no
665 * matter how large maxra is.
667 if (findblk(vp
, loffset
+ (maxra
- 2) * blksize
, FINDBLK_TEST
))
672 if (findblk(vp
, loffset
+ i
* blksize
,
673 FINDBLK_TEST
) == NULL
) {
680 * We got everything or everything is in the cache, no
687 * Calculate where to start the read-ahead and how much
688 * to do. Generally speaking we want to read-ahead by
689 * (maxra) when we've found a read-ahead mark. We do
690 * not want to reduce maxra here as it will cause
691 * successive read-ahead I/O's to be smaller and smaller.
693 * However, we have to make sure we don't break the
694 * filesize limitation for the clustered operation.
696 loffset
+= i
* blksize
;
698 /* leave reqbp intact to force function callback */
700 if (loffset
>= filesize
)
702 if (loffset
+ maxra
* blksize
> filesize
) {
703 maxreq
= filesize
- loffset
;
704 maxra
= (int)(maxreq
/ blksize
);
709 * bp is not valid, no prior cluster in progress so get a
710 * full cluster read-ahead going.
712 __debugvar off_t firstread
= bp
->b_loffset
;
717 * Set-up synchronous read for bp.
719 bp
->b_flags
&= ~(B_ERROR
| B_EINTR
| B_INVAL
);
720 bp
->b_cmd
= BUF_CMD_READ
;
721 bp
->b_bio1
.bio_done
= func
;
722 bp
->b_bio1
.bio_caller_info1
.ptr
= arg
;
724 reqbp
= NULL
; /* don't func() reqbp, it's running async */
726 KASSERT(firstread
!= NOOFFSET
,
727 ("cluster_read: no buffer offset"));
730 * nblks is our cluster_rbuild request size, limited
731 * primarily by the device.
733 nblks
= calc_rbuild_reqsize(maxra
, maxrbuild
);
736 * Set RAM half-way through the full-cluster.
738 sr
= (maxra
+ 1) / 2;
743 error
= VOP_BMAP(vp
, loffset
, &doffset
,
744 &burstbytes
, NULL
, BUF_CMD_READ
);
746 goto single_block_read
;
747 if (nblks
> burstbytes
/ blksize
)
748 nblks
= burstbytes
/ blksize
;
749 if (doffset
== NOOFFSET
)
750 goto single_block_read
;
752 goto single_block_read
;
754 bp
= cluster_rbuild(vp
, filesize
, loffset
,
755 doffset
, blksize
, nblks
, bp
, &sr
);
756 loffset
+= bp
->b_bufsize
;
757 maxra
-= bp
->b_bufsize
/ blksize
;
761 * If it isn't in the cache, then get a chunk from
762 * disk if sequential, otherwise just get the block.
770 * If bp != NULL then B_CACHE was *NOT* set and bp must be issued.
771 * bp will either be an asynchronous cluster buf or an asynchronous
774 * NOTE: Once an async cluster buf is issued bp becomes invalid.
777 #if defined(CLUSTERDEBUG)
779 kprintf("S(%012jx,%d,%d)\n",
780 (intmax_t)bp
->b_loffset
, bp
->b_bcount
, maxra
);
782 if ((bp
->b_flags
& B_CLUSTER
) == 0)
783 vfs_busy_pages(vp
, bp
);
784 bp
->b_flags
&= ~(B_ERROR
|B_INVAL
);
785 vn_strategy(vp
, &bp
->b_bio1
);
790 #if defined(CLUSTERDEBUG)
792 kprintf("cluster_rd %016jx/%d maxra=%d sr=%d\n",
793 loffset
, blksize
, maxra
, sr
);
797 * If we have been doing sequential I/O, then do some read-ahead.
798 * The code above us should have positioned us at the next likely
801 * Only mess with buffers which we can immediately lock. HAMMER
802 * will do device-readahead irrespective of what the blocks
810 rbp
= getblk(vp
, loffset
, blksize
,
811 GETBLK_SZMATCH
|GETBLK_NOWAIT
, 0);
814 if ((rbp
->b_flags
& B_CACHE
)) {
820 * If BMAP is not supported or has an issue, we still do
821 * (maxra) read-ahead, but we do not try to use rbuild.
823 error
= VOP_BMAP(vp
, loffset
, &doffset
,
824 &burstbytes
, NULL
, BUF_CMD_READ
);
825 if (error
|| doffset
== NOOFFSET
) {
829 nblks
= calc_rbuild_reqsize(maxra
, maxrbuild
);
830 if (nblks
> burstbytes
/ blksize
)
831 nblks
= burstbytes
/ blksize
;
833 rbp
->b_cmd
= BUF_CMD_READ
;
836 rbp
= cluster_rbuild(vp
, filesize
, loffset
,
840 rbp
->b_bio2
.bio_offset
= doffset
;
845 rbp
->b_flags
&= ~(B_ERROR
|B_INVAL
);
847 if ((rbp
->b_flags
& B_CLUSTER
) == 0)
848 vfs_busy_pages(vp
, rbp
);
850 loffset
+= rbp
->b_bufsize
;
851 maxra
-= rbp
->b_bufsize
/ blksize
;
852 vn_strategy(vp
, &rbp
->b_bio1
);
853 /* rbp invalid now */
857 * If reqbp is non-NULL it had B_CACHE set and we issue the
858 * function callback synchronously.
860 * Note that we may start additional asynchronous I/O before doing
861 * the func() callback for the B_CACHE case
865 func(&reqbp
->b_bio1
);
869 * If blocks are contiguous on disk, use this to provide clustered
870 * read ahead. We will read as many blocks as possible sequentially
871 * and then parcel them up into logical blocks in the buffer hash table.
873 * This function either returns a cluster buf or it returns fbp. fbp is
874 * already expected to be set up as a synchronous or asynchronous request.
876 * If a cluster buf is returned it will always be async.
878 * (*srp) counts down original blocks to determine where B_RAM should be set.
879 * Set B_RAM when *srp drops to 0. If (*srp) starts at 0, B_RAM will not be
880 * set on any buffer. Make sure B_RAM is cleared on any other buffers to
881 * prevent degenerate read-aheads from being generated.
884 cluster_rbuild(struct vnode
*vp
, off_t filesize
, off_t loffset
, off_t doffset
,
885 int blksize
, int run
, struct buf
*fbp
, int *srp
)
887 struct buf
*bp
, *tbp
;
890 int maxiosize
= vmaxiosize(vp
);
895 while (loffset
+ run
* blksize
> filesize
) {
900 tbp
->b_bio2
.bio_offset
= doffset
;
901 if((tbp
->b_flags
& B_MALLOC
) ||
902 ((tbp
->b_flags
& B_VMIO
) == 0) || (run
<= 1)) {
911 * Get a pbuf, limit cluster I/O on a per-device basis. If
912 * doing cluster I/O for a file, limit cluster I/O on a
915 if (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
)
916 bp
= trypbuf_kva(&vp
->v_pbuf_count
);
918 bp
= trypbuf_kva(&vp
->v_mount
->mnt_pbuf_count
);
924 * We are synthesizing a buffer out of vm_page_t's, but
925 * if the block size is not page aligned then the starting
926 * address may not be either. Inherit the b_data offset
927 * from the original buffer.
930 bp
->b_data
= (char *)((vm_offset_t
)bp
->b_data
|
931 ((vm_offset_t
)tbp
->b_data
& PAGE_MASK
));
932 bp
->b_flags
|= B_CLUSTER
| B_VMIO
;
933 bp
->b_cmd
= BUF_CMD_READ
;
934 bp
->b_bio1
.bio_done
= cluster_callback
; /* default to async */
935 bp
->b_bio1
.bio_caller_info1
.cluster_head
= NULL
;
936 bp
->b_bio1
.bio_caller_info2
.cluster_tail
= NULL
;
937 bp
->b_loffset
= loffset
;
938 bp
->b_bio2
.bio_offset
= doffset
;
939 KASSERT(bp
->b_loffset
!= NOOFFSET
,
940 ("cluster_rbuild: no buffer offset"));
944 bp
->b_xio
.xio_npages
= 0;
946 for (boffset
= doffset
, i
= 0; i
< run
; ++i
, boffset
+= blksize
) {
948 if ((bp
->b_xio
.xio_npages
* PAGE_SIZE
) +
949 round_page(blksize
) > maxiosize
) {
954 * Shortcut some checks and try to avoid buffers that
955 * would block in the lock. The same checks have to
956 * be made again after we officially get the buffer.
958 tbp
= getblk(vp
, loffset
+ i
* blksize
, blksize
,
959 GETBLK_SZMATCH
|GETBLK_NOWAIT
, 0);
962 for (j
= 0; j
< tbp
->b_xio
.xio_npages
; j
++) {
963 if (tbp
->b_xio
.xio_pages
[j
]->valid
)
966 if (j
!= tbp
->b_xio
.xio_npages
) {
972 * Stop scanning if the buffer is fuly valid
973 * (marked B_CACHE), or locked (may be doing a
974 * background write), or if the buffer is not
975 * VMIO backed. The clustering code can only deal
976 * with VMIO-backed buffers.
978 if ((tbp
->b_flags
& (B_CACHE
|B_LOCKED
)) ||
979 (tbp
->b_flags
& B_VMIO
) == 0 ||
980 (LIST_FIRST(&tbp
->b_dep
) != NULL
&&
988 * The buffer must be completely invalid in order to
989 * take part in the cluster. If it is partially valid
992 for (j
= 0;j
< tbp
->b_xio
.xio_npages
; j
++) {
993 if (tbp
->b_xio
.xio_pages
[j
]->valid
)
996 if (j
!= tbp
->b_xio
.xio_npages
) {
1002 * Depress the priority of buffers not explicitly
1005 /* tbp->b_flags |= B_AGE; */
1008 * Set the block number if it isn't set, otherwise
1009 * if it is make sure it matches the block number we
1012 if (tbp
->b_bio2
.bio_offset
== NOOFFSET
) {
1013 tbp
->b_bio2
.bio_offset
= boffset
;
1014 } else if (tbp
->b_bio2
.bio_offset
!= boffset
) {
1021 * Set B_RAM if (*srp) is 1. B_RAM is only set on one buffer
1022 * in the cluster, including potentially the first buffer
1023 * once we start streaming the read-aheads.
1026 cluster_setram(tbp
);
1028 cluster_clrram(tbp
);
1031 * The passed-in tbp (i == 0) will already be set up for
1032 * async or sync operation. All other tbp's acquire in
1033 * our loop are set up for async operation.
1035 tbp
->b_cmd
= BUF_CMD_READ
;
1037 cluster_append(&bp
->b_bio1
, tbp
);
1038 for (j
= 0; j
< tbp
->b_xio
.xio_npages
; ++j
) {
1041 m
= tbp
->b_xio
.xio_pages
[j
];
1042 vm_page_busy_wait(m
, FALSE
, "clurpg");
1043 vm_page_io_start(m
);
1045 vm_object_pip_add(m
->object
, 1);
1046 if ((bp
->b_xio
.xio_npages
== 0) ||
1047 (bp
->b_xio
.xio_pages
[bp
->b_xio
.xio_npages
-1] != m
)) {
1048 bp
->b_xio
.xio_pages
[bp
->b_xio
.xio_npages
] = m
;
1049 bp
->b_xio
.xio_npages
++;
1051 if ((m
->valid
& VM_PAGE_BITS_ALL
) == VM_PAGE_BITS_ALL
) {
1052 tbp
->b_xio
.xio_pages
[j
] = bogus_page
;
1053 tbp
->b_flags
|= B_HASBOGUS
;
1057 * XXX shouldn't this be += size for both, like in
1060 * Don't inherit tbp->b_bufsize as it may be larger due to
1061 * a non-page-aligned size. Instead just aggregate using
1064 if (tbp
->b_bcount
!= blksize
)
1065 kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp
->b_bcount
, blksize
);
1066 if (tbp
->b_bufsize
!= blksize
)
1067 kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp
->b_bufsize
, blksize
);
1068 bp
->b_bcount
+= blksize
;
1069 bp
->b_bufsize
+= blksize
;
1073 * Fully valid pages in the cluster are already good and do not need
1074 * to be re-read from disk. Replace the page with bogus_page
1076 for (j
= 0; j
< bp
->b_xio
.xio_npages
; j
++) {
1077 if ((bp
->b_xio
.xio_pages
[j
]->valid
& VM_PAGE_BITS_ALL
) ==
1079 bp
->b_xio
.xio_pages
[j
] = bogus_page
;
1080 bp
->b_flags
|= B_HASBOGUS
;
1083 if (bp
->b_bufsize
> bp
->b_kvasize
) {
1084 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
1085 bp
->b_bufsize
, bp
->b_kvasize
);
1087 pmap_qenter(trunc_page((vm_offset_t
) bp
->b_data
),
1088 (vm_page_t
*)bp
->b_xio
.xio_pages
, bp
->b_xio
.xio_npages
);
1094 * Cleanup after a clustered read or write.
1095 * This is complicated by the fact that any of the buffers might have
1096 * extra memory (if there were no empty buffer headers at allocbuf time)
1097 * that we will need to shift around.
1099 * The returned bio is &bp->b_bio1
1102 cluster_callback(struct bio
*bio
)
1104 struct buf
*bp
= bio
->bio_buf
;
1110 * Must propogate errors to all the components. A short read (EOF)
1111 * is a critical error.
1113 if (bp
->b_flags
& B_ERROR
) {
1114 error
= bp
->b_error
;
1115 } else if (bp
->b_bcount
!= bp
->b_bufsize
) {
1116 panic("cluster_callback: unexpected EOF on cluster %p!", bio
);
1119 pmap_qremove(trunc_page((vm_offset_t
) bp
->b_data
),
1120 bp
->b_xio
.xio_npages
);
1122 * Move memory from the large cluster buffer into the component
1123 * buffers and mark IO as done on these. Since the memory map
1124 * is the same, no actual copying is required.
1126 while ((tbp
= bio
->bio_caller_info1
.cluster_head
) != NULL
) {
1127 bio
->bio_caller_info1
.cluster_head
= tbp
->b_cluster_next
;
1129 tbp
->b_flags
|= B_ERROR
| B_IOISSUED
;
1130 tbp
->b_error
= error
;
1132 tbp
->b_dirtyoff
= tbp
->b_dirtyend
= 0;
1133 tbp
->b_flags
&= ~(B_ERROR
|B_INVAL
);
1134 tbp
->b_flags
|= B_IOISSUED
;
1136 * XXX the bdwrite()/bqrelse() issued during
1137 * cluster building clears B_RELBUF (see bqrelse()
1138 * comment). If direct I/O was specified, we have
1139 * to restore it here to allow the buffer and VM
1142 if (tbp
->b_flags
& B_DIRECT
)
1143 tbp
->b_flags
|= B_RELBUF
;
1146 * XXX I think biodone() below will do this, but do
1147 * it here anyway for consistency.
1149 if (tbp
->b_cmd
== BUF_CMD_WRITE
)
1152 biodone(&tbp
->b_bio1
);
1156 if (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
)
1157 relpbuf(bp
, &vp
->v_pbuf_count
);
1159 relpbuf(bp
, &vp
->v_mount
->mnt_pbuf_count
);
1163 * Implement modified write build for cluster.
1165 * write_behind = 0 write behind disabled
1166 * write_behind = 1 write behind normal (default)
1167 * write_behind = 2 write behind backed-off
1169 * In addition, write_behind is only activated for files that have
1170 * grown past a certain size (default 10MB). Otherwise temporary files
1171 * wind up generating a lot of unnecessary disk I/O.
1174 cluster_wbuild_wb(struct vnode
*vp
, int blksize
, off_t start_loffset
, int len
)
1178 switch(write_behind
) {
1180 if (start_loffset
< len
)
1182 start_loffset
-= len
;
1185 if (vp
->v_filesize
>= write_behind_minfilesize
) {
1186 r
= cluster_wbuild(vp
, NULL
, blksize
,
1187 start_loffset
, len
);
1198 * Do clustered write for FFS.
1201 * 1. Write is not sequential (write asynchronously)
1202 * Write is sequential:
1203 * 2. beginning of cluster - begin cluster
1204 * 3. middle of a cluster - add to cluster
1205 * 4. end of a cluster - asynchronously write cluster
1207 * WARNING! vnode fields are not locked and must ONLY be used heuristically.
1210 cluster_write(struct buf
*bp
, off_t filesize
, int blksize
, int seqcount
)
1214 int maxclen
, cursize
;
1216 cluster_cache_t dummy
;
1217 cluster_cache_t
*cc
;
1220 if (vp
->v_type
== VREG
)
1221 async
= vp
->v_mount
->mnt_flag
& MNT_ASYNC
;
1224 loffset
= bp
->b_loffset
;
1225 KASSERT(bp
->b_loffset
!= NOOFFSET
,
1226 ("cluster_write: no buffer offset"));
1228 cc
= cluster_getcache(&dummy
, vp
, loffset
);
1231 * Initialize vnode to beginning of file.
1234 cc
->v_lasta
= cc
->v_clen
= cc
->v_cstart
= cc
->v_lastw
= 0;
1236 if (cc
->v_clen
== 0 || loffset
!= cc
->v_lastw
||
1237 (bp
->b_bio2
.bio_offset
!= NOOFFSET
&&
1238 (bp
->b_bio2
.bio_offset
!= cc
->v_lasta
))) {
1240 * Next block is not logically sequential, or, if physical
1241 * block offsets are available, not physically sequential.
1243 * If physical block offsets are not available we only
1244 * get here if we weren't logically sequential.
1246 maxclen
= vmaxiosize(vp
);
1247 if (cc
->v_clen
!= 0) {
1249 * Next block is not sequential.
1251 * If we are not writing at end of file, the process
1252 * seeked to another point in the file since its last
1253 * write, or we have reached our maximum cluster size,
1254 * then push the previous cluster. Otherwise try
1255 * reallocating to make it sequential.
1257 * Change to algorithm: only push previous cluster if
1258 * it was sequential from the point of view of the
1259 * seqcount heuristic, otherwise leave the buffer
1260 * intact so we can potentially optimize the I/O
1261 * later on in the buf_daemon or update daemon
1264 cursize
= cc
->v_lastw
- cc
->v_cstart
;
1265 if (bp
->b_loffset
+ blksize
< filesize
||
1266 loffset
!= cc
->v_lastw
||
1267 cc
->v_clen
<= cursize
) {
1268 if (!async
&& seqcount
> 0) {
1269 cluster_wbuild_wb(vp
, blksize
,
1270 cc
->v_cstart
, cursize
);
1273 struct buf
**bpp
, **endbp
;
1274 struct cluster_save
*buflist
;
1276 buflist
= cluster_collectbufs(cc
, vp
,
1278 endbp
= &buflist
->bs_children
1279 [buflist
->bs_nchildren
- 1];
1280 if (VOP_REALLOCBLKS(vp
, buflist
)) {
1282 * Failed, push the previous cluster
1283 * if *really* writing sequentially
1284 * in the logical file (seqcount > 1),
1285 * otherwise delay it in the hopes that
1286 * the low level disk driver can
1287 * optimize the write ordering.
1289 * NOTE: We do not brelse the last
1290 * element which is bp, and we
1291 * do not return here.
1293 for (bpp
= buflist
->bs_children
;
1296 kfree(buflist
, M_SEGMENT
);
1298 cluster_wbuild_wb(vp
,
1299 blksize
, cc
->v_cstart
,
1304 * Succeeded, keep building cluster.
1306 for (bpp
= buflist
->bs_children
;
1307 bpp
<= endbp
; bpp
++)
1309 kfree(buflist
, M_SEGMENT
);
1310 cc
->v_lastw
= loffset
+ blksize
;
1311 cc
->v_lasta
= bp
->b_bio2
.bio_offset
+
1313 cluster_putcache(cc
);
1320 * Consider beginning a cluster. If at end of file, make
1321 * cluster as large as possible, otherwise find size of
1324 if ((vp
->v_type
== VREG
) &&
1325 bp
->b_loffset
+ blksize
< filesize
&&
1326 (bp
->b_bio2
.bio_offset
== NOOFFSET
) &&
1327 (VOP_BMAP(vp
, loffset
, &bp
->b_bio2
.bio_offset
, &maxclen
, NULL
, BUF_CMD_WRITE
) ||
1328 bp
->b_bio2
.bio_offset
== NOOFFSET
)) {
1331 cc
->v_lasta
= bp
->b_bio2
.bio_offset
+ blksize
;
1332 cc
->v_cstart
= loffset
;
1333 cc
->v_lastw
= loffset
+ blksize
;
1334 cluster_putcache(cc
);
1337 if (maxclen
> blksize
)
1338 cc
->v_clen
= maxclen
;
1340 cc
->v_clen
= blksize
;
1341 if (!async
&& cc
->v_clen
== 0) { /* I/O not contiguous */
1342 cc
->v_cstart
= loffset
;
1344 } else { /* Wait for rest of cluster */
1345 cc
->v_cstart
= loffset
;
1348 } else if (loffset
== cc
->v_cstart
+ cc
->v_clen
) {
1350 * At end of cluster, write it out if seqcount tells us we
1351 * are operating sequentially, otherwise let the buf or
1352 * update daemon handle it.
1356 cluster_wbuild_wb(vp
, blksize
, cc
->v_cstart
,
1357 cc
->v_clen
+ blksize
);
1359 cc
->v_cstart
= loffset
;
1360 } else if (vm_page_count_severe() &&
1361 bp
->b_loffset
+ blksize
< filesize
) {
1363 * We are low on memory, get it going NOW. However, do not
1364 * try to push out a partial block at the end of the file
1365 * as this could lead to extremely non-optimal write activity.
1370 * In the middle of a cluster, so just delay the I/O for now.
1374 cc
->v_lastw
= loffset
+ blksize
;
1375 cc
->v_lasta
= bp
->b_bio2
.bio_offset
+ blksize
;
1376 cluster_putcache(cc
);
1380 * This is the clustered version of bawrite(). It works similarly to
1381 * cluster_write() except I/O on the buffer is guaranteed to occur.
1384 cluster_awrite(struct buf
*bp
)
1389 * Don't bother if it isn't clusterable.
1391 if ((bp
->b_flags
& B_CLUSTEROK
) == 0 ||
1393 (bp
->b_vp
->v_flag
& VOBJBUF
) == 0) {
1394 total
= bp
->b_bufsize
;
1399 total
= cluster_wbuild(bp
->b_vp
, &bp
, bp
->b_bufsize
,
1400 bp
->b_loffset
, vmaxiosize(bp
->b_vp
));
1403 * If bp is still non-NULL then cluster_wbuild() did not initiate
1404 * I/O on it and we must do so here to provide the API guarantee.
1413 * This is an awful lot like cluster_rbuild...wish they could be combined.
1414 * The last lbn argument is the current block on which I/O is being
1415 * performed. Check to see that it doesn't fall in the middle of
1416 * the current block (if last_bp == NULL).
1418 * cluster_wbuild() normally does not guarantee anything. If bpp is
1419 * non-NULL and cluster_wbuild() is able to incorporate it into the
1420 * I/O it will set *bpp to NULL, otherwise it will leave it alone and
1421 * the caller must dispose of *bpp.
1424 cluster_wbuild(struct vnode
*vp
, struct buf
**bpp
,
1425 int blksize
, off_t start_loffset
, int bytes
)
1427 struct buf
*bp
, *tbp
;
1429 int totalwritten
= 0;
1431 int maxiosize
= vmaxiosize(vp
);
1435 * If the buffer matches the passed locked & removed buffer
1436 * we used the passed buffer (which might not be B_DELWRI).
1438 * Otherwise locate the buffer and determine if it is
1441 if (bpp
&& (*bpp
)->b_loffset
== start_loffset
) {
1446 tbp
= findblk(vp
, start_loffset
, FINDBLK_NBLOCK
);
1448 (tbp
->b_flags
& (B_LOCKED
| B_INVAL
| B_DELWRI
)) !=
1450 (LIST_FIRST(&tbp
->b_dep
) && buf_checkwrite(tbp
))) {
1453 start_loffset
+= blksize
;
1459 KKASSERT(tbp
->b_cmd
== BUF_CMD_DONE
);
1462 * Extra memory in the buffer, punt on this buffer.
1463 * XXX we could handle this in most cases, but we would
1464 * have to push the extra memory down to after our max
1465 * possible cluster size and then potentially pull it back
1466 * up if the cluster was terminated prematurely--too much
1469 if (((tbp
->b_flags
& (B_CLUSTEROK
|B_MALLOC
)) != B_CLUSTEROK
) ||
1470 (tbp
->b_bcount
!= tbp
->b_bufsize
) ||
1471 (tbp
->b_bcount
!= blksize
) ||
1472 (bytes
== blksize
)) {
1473 totalwritten
+= tbp
->b_bufsize
;
1475 start_loffset
+= blksize
;
1481 * Get a pbuf, limit cluster I/O on a per-device basis. If
1482 * doing cluster I/O for a file, limit cluster I/O on a
1485 * HAMMER and other filesystems may attempt to queue a massive
1486 * amount of write I/O, using trypbuf() here easily results in
1487 * situation where the I/O stream becomes non-clustered.
1489 if (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
)
1490 bp
= getpbuf_kva(&vp
->v_pbuf_count
);
1492 bp
= getpbuf_kva(&vp
->v_mount
->mnt_pbuf_count
);
1495 * Set up the pbuf. Track our append point with b_bcount
1496 * and b_bufsize. b_bufsize is not used by the device but
1497 * our caller uses it to loop clusters and we use it to
1498 * detect a premature EOF on the block device.
1502 bp
->b_xio
.xio_npages
= 0;
1503 bp
->b_loffset
= tbp
->b_loffset
;
1504 bp
->b_bio2
.bio_offset
= tbp
->b_bio2
.bio_offset
;
1508 * We are synthesizing a buffer out of vm_page_t's, but
1509 * if the block size is not page aligned then the starting
1510 * address may not be either. Inherit the b_data offset
1511 * from the original buffer.
1513 bp
->b_data
= (char *)((vm_offset_t
)bp
->b_data
|
1514 ((vm_offset_t
)tbp
->b_data
& PAGE_MASK
));
1515 bp
->b_flags
&= ~B_ERROR
;
1516 bp
->b_flags
|= B_CLUSTER
| B_BNOCLIP
|
1517 (tbp
->b_flags
& (B_VMIO
| B_NEEDCOMMIT
));
1518 bp
->b_bio1
.bio_caller_info1
.cluster_head
= NULL
;
1519 bp
->b_bio1
.bio_caller_info2
.cluster_tail
= NULL
;
1522 * From this location in the file, scan forward to see
1523 * if there are buffers with adjacent data that need to
1524 * be written as well.
1526 * IO *must* be initiated on index 0 at this point
1527 * (particularly when called from cluster_awrite()).
1529 for (i
= 0; i
< bytes
; (i
+= blksize
), (start_loffset
+= blksize
)) {
1537 tbp
= findblk(vp
, start_loffset
,
1540 * Buffer not found or could not be locked
1547 * If it IS in core, but has different
1548 * characteristics, then don't cluster
1551 if ((tbp
->b_flags
& (B_VMIO
| B_CLUSTEROK
|
1552 B_INVAL
| B_DELWRI
| B_NEEDCOMMIT
))
1553 != (B_DELWRI
| B_CLUSTEROK
|
1554 (bp
->b_flags
& (B_VMIO
| B_NEEDCOMMIT
))) ||
1555 (tbp
->b_flags
& B_LOCKED
)
1562 * Check that the combined cluster
1563 * would make sense with regard to pages
1564 * and would not be too large
1566 * WARNING! buf_checkwrite() must be the last
1567 * check made. If it returns 0 then
1568 * we must initiate the I/O.
1570 if ((tbp
->b_bcount
!= blksize
) ||
1571 ((bp
->b_bio2
.bio_offset
+ i
) !=
1572 tbp
->b_bio2
.bio_offset
) ||
1573 ((tbp
->b_xio
.xio_npages
+ bp
->b_xio
.xio_npages
) >
1574 (maxiosize
/ PAGE_SIZE
)) ||
1575 (LIST_FIRST(&tbp
->b_dep
) &&
1576 buf_checkwrite(tbp
))
1581 if (LIST_FIRST(&tbp
->b_dep
))
1584 * Ok, it's passed all the tests,
1585 * so remove it from the free list
1586 * and mark it busy. We will use it.
1589 KKASSERT(tbp
->b_cmd
== BUF_CMD_DONE
);
1593 * If the IO is via the VM then we do some
1594 * special VM hackery (yuck). Since the buffer's
1595 * block size may not be page-aligned it is possible
1596 * for a page to be shared between two buffers. We
1597 * have to get rid of the duplication when building
1600 if (tbp
->b_flags
& B_VMIO
) {
1604 * Try to avoid deadlocks with the VM system.
1605 * However, we cannot abort the I/O if
1606 * must_initiate is non-zero.
1608 if (must_initiate
== 0) {
1610 j
< tbp
->b_xio
.xio_npages
;
1612 m
= tbp
->b_xio
.xio_pages
[j
];
1613 if (m
->flags
& PG_BUSY
) {
1620 for (j
= 0; j
< tbp
->b_xio
.xio_npages
; ++j
) {
1621 m
= tbp
->b_xio
.xio_pages
[j
];
1622 vm_page_busy_wait(m
, FALSE
, "clurpg");
1623 vm_page_io_start(m
);
1625 vm_object_pip_add(m
->object
, 1);
1626 if ((bp
->b_xio
.xio_npages
== 0) ||
1627 (bp
->b_xio
.xio_pages
[bp
->b_xio
.xio_npages
- 1] != m
)) {
1628 bp
->b_xio
.xio_pages
[bp
->b_xio
.xio_npages
] = m
;
1629 bp
->b_xio
.xio_npages
++;
1633 bp
->b_bcount
+= blksize
;
1634 bp
->b_bufsize
+= blksize
;
1637 * NOTE: see bwrite/bawrite code for why we no longer
1640 * bundirty(tbp); REMOVED
1642 tbp
->b_flags
&= ~B_ERROR
;
1643 tbp
->b_cmd
= BUF_CMD_WRITE
;
1645 cluster_append(&bp
->b_bio1
, tbp
);
1648 * check for latent dependencies to be handled
1650 if (LIST_FIRST(&tbp
->b_dep
) != NULL
)
1654 pmap_qenter(trunc_page((vm_offset_t
)bp
->b_data
),
1655 (vm_page_t
*)bp
->b_xio
.xio_pages
,
1656 bp
->b_xio
.xio_npages
);
1657 if (bp
->b_bufsize
> bp
->b_kvasize
) {
1658 panic("cluster_wbuild: b_bufsize(%d) "
1659 "> b_kvasize(%d)\n",
1660 bp
->b_bufsize
, bp
->b_kvasize
);
1662 totalwritten
+= bp
->b_bufsize
;
1664 bp
->b_dirtyend
= bp
->b_bufsize
;
1665 bp
->b_bio1
.bio_done
= cluster_callback
;
1666 bp
->b_cmd
= BUF_CMD_WRITE
;
1668 vfs_busy_pages(vp
, bp
);
1669 bsetrunningbufspace(bp
, bp
->b_bufsize
);
1671 vn_strategy(vp
, &bp
->b_bio1
);
1675 return totalwritten
;
1679 * Collect together all the buffers in a cluster, plus add one
1680 * additional buffer passed-in.
1682 * Only pre-existing buffers whos block size matches blksize are collected.
1683 * (this is primarily because HAMMER1 uses varying block sizes and we don't
1684 * want to override its choices).
1686 * This code will not try to collect buffers that it cannot lock, otherwise
1687 * it might deadlock against SMP-friendly filesystems.
1689 static struct cluster_save
*
1690 cluster_collectbufs(cluster_cache_t
*cc
, struct vnode
*vp
,
1691 struct buf
*last_bp
, int blksize
)
1693 struct cluster_save
*buflist
;
1700 len
= (int)(cc
->v_lastw
- cc
->v_cstart
) / blksize
;
1702 buflist
= kmalloc(sizeof(struct buf
*) * (len
+ 1) + sizeof(*buflist
),
1703 M_SEGMENT
, M_WAITOK
);
1704 buflist
->bs_nchildren
= 0;
1705 buflist
->bs_children
= (struct buf
**) (buflist
+ 1);
1706 for (loffset
= cc
->v_cstart
, i
= 0, j
= 0;
1708 (loffset
+= blksize
), i
++) {
1709 bp
= getcacheblk(vp
, loffset
,
1710 last_bp
->b_bcount
, GETBLK_SZMATCH
|
1712 buflist
->bs_children
[i
] = bp
;
1715 } else if (bp
->b_bio2
.bio_offset
== NOOFFSET
) {
1716 VOP_BMAP(bp
->b_vp
, bp
->b_loffset
,
1717 &bp
->b_bio2
.bio_offset
,
1718 NULL
, NULL
, BUF_CMD_WRITE
);
1725 for (k
= 0; k
< j
; ++k
) {
1726 if (buflist
->bs_children
[k
]) {
1727 bqrelse(buflist
->bs_children
[k
]);
1728 buflist
->bs_children
[k
] = NULL
;
1733 bcopy(buflist
->bs_children
+ j
,
1734 buflist
->bs_children
+ 0,
1735 sizeof(buflist
->bs_children
[0]) * (i
- j
));
1739 buflist
->bs_children
[i
] = bp
= last_bp
;
1740 if (bp
->b_bio2
.bio_offset
== NOOFFSET
) {
1741 VOP_BMAP(bp
->b_vp
, bp
->b_loffset
, &bp
->b_bio2
.bio_offset
,
1742 NULL
, NULL
, BUF_CMD_WRITE
);
1744 buflist
->bs_nchildren
= i
+ 1;
1749 cluster_append(struct bio
*bio
, struct buf
*tbp
)
1751 tbp
->b_cluster_next
= NULL
;
1752 if (bio
->bio_caller_info1
.cluster_head
== NULL
) {
1753 bio
->bio_caller_info1
.cluster_head
= tbp
;
1754 bio
->bio_caller_info2
.cluster_tail
= tbp
;
1756 bio
->bio_caller_info2
.cluster_tail
->b_cluster_next
= tbp
;
1757 bio
->bio_caller_info2
.cluster_tail
= tbp
;
1763 cluster_setram(struct buf
*bp
)
1765 bp
->b_flags
|= B_RAM
;
1766 if (bp
->b_xio
.xio_npages
)
1767 vm_page_flag_set(bp
->b_xio
.xio_pages
[0], PG_RAM
);
1772 cluster_clrram(struct buf
*bp
)
1774 bp
->b_flags
&= ~B_RAM
;
1775 if (bp
->b_xio
.xio_npages
)
1776 vm_page_flag_clear(bp
->b_xio
.xio_pages
[0], PG_RAM
);