3 * The Regents of the University of California. All rights reserved.
4 * Modifications/enhancements:
5 * Copyright (c) 1995 John S. Dyson. All rights reserved.
6 * Copyright (c) 2012-2013 Matthew Dillon. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include "opt_debug_cluster.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
40 #include <sys/vnode.h>
41 #include <sys/malloc.h>
42 #include <sys/mount.h>
43 #include <sys/resourcevar.h>
44 #include <sys/vmmeter.h>
46 #include <vm/vm_object.h>
47 #include <vm/vm_page.h>
48 #include <sys/sysctl.h>
51 #include <vm/vm_page2.h>
53 #include <machine/limits.h>
56 * Cluster tracking cache - replaces the original vnode v_* fields which had
57 * limited utility and were not MP safe.
59 * The cluster tracking cache is a simple 4-way set-associative non-chained
60 * cache. It is capable of tracking up to four zones separated by 1MB or
63 * NOTE: We want this structure to be cache-line friendly so the iterator
64 * is embedded rather than in a separate array.
66 * NOTE: A cluster cache entry can become stale when a vnode is recycled.
67 * For now we treat the values as heuristical but also self-consistent.
68 * i.e. the values cannot be completely random and cannot be SMP unsafe
69 * or the cluster code might end-up clustering non-contiguous buffers
70 * at the wrong offsets.
72 struct cluster_cache
{
75 off_t v_lastw
; /* last write (end) (write cluster) */
76 off_t v_cstart
; /* start block (beg) of cluster */
77 off_t v_lasta
; /* last allocation (end) */
78 u_int v_clen
; /* length of current cluster */
82 typedef struct cluster_cache cluster_cache_t
;
84 #define CLUSTER_CACHE_SIZE 512
85 #define CLUSTER_CACHE_MASK (CLUSTER_CACHE_SIZE - 1)
87 #define CLUSTER_ZONE ((off_t)(1024 * 1024))
89 cluster_cache_t cluster_array
[CLUSTER_CACHE_SIZE
];
91 #if defined(CLUSTERDEBUG)
92 #include <sys/sysctl.h>
93 static int rcluster
= 0;
94 SYSCTL_INT(_debug
, OID_AUTO
, rcluster
, CTLFLAG_RW
, &rcluster
, 0, "");
97 static MALLOC_DEFINE(M_SEGMENT
, "cluster_save", "cluster_save buffer");
99 static struct cluster_save
*
100 cluster_collectbufs (cluster_cache_t
*cc
, struct vnode
*vp
,
101 struct buf
*last_bp
, int blksize
);
103 cluster_rbuild (struct vnode
*vp
, off_t filesize
, off_t loffset
,
104 off_t doffset
, int blksize
, int run
,
105 struct buf
*fbp
, int *srp
);
106 static void cluster_callback (struct bio
*);
107 static void cluster_setram (struct buf
*);
108 static void cluster_clrram (struct buf
*);
109 static int cluster_wbuild(struct vnode
*vp
, struct buf
**bpp
, int blksize
,
110 off_t start_loffset
, int bytes
);
112 static int write_behind
= 1;
113 SYSCTL_INT(_vfs
, OID_AUTO
, write_behind
, CTLFLAG_RW
, &write_behind
, 0,
114 "Cluster write-behind setting");
115 static quad_t write_behind_minfilesize
= 10 * 1024 * 1024;
116 SYSCTL_QUAD(_vfs
, OID_AUTO
, write_behind_minfilesize
, CTLFLAG_RW
,
117 &write_behind_minfilesize
, 0, "Cluster write-behind setting");
118 static int max_readahead
= 2 * 1024 * 1024;
119 SYSCTL_INT(_vfs
, OID_AUTO
, max_readahead
, CTLFLAG_RW
, &max_readahead
, 0,
120 "Limit in bytes for desired cluster read-ahead");
122 extern vm_page_t bogus_page
;
125 * nblks is our cluster_rbuild request size. The approximate number of
126 * physical read-ahead requests is maxra / nblks. The physical request
127 * size is limited by the device (maxrbuild). We also do not want to make
128 * the request size too big or it will mess up the B_RAM streaming.
132 calc_rbuild_reqsize(int maxra
, int maxrbuild
)
136 if ((nblks
= maxra
/ 4) > maxrbuild
)
144 * Acquire/release cluster cache (can return dummy entry)
148 cluster_getcache(cluster_cache_t
*dummy
, struct vnode
*vp
, off_t loffset
)
155 hv
= (size_t)(intptr_t)vp
^ (size_t)(intptr_t)vp
/ sizeof(*vp
);
156 hv
&= CLUSTER_CACHE_MASK
& ~3;
157 cc
= &cluster_array
[hv
];
160 for (i
= 0; i
< 4; ++i
) {
163 if (((cc
[i
].v_cstart
^ loffset
) & ~(CLUSTER_ZONE
- 1)) == 0) {
168 if (xact
>= 0 && atomic_swap_int(&cc
[xact
].locked
, 1) == 0) {
169 if (cc
[xact
].vp
== vp
&&
170 ((cc
[i
].v_cstart
^ loffset
) & ~(CLUSTER_ZONE
- 1)) == 0) {
173 atomic_swap_int(&cc
[xact
].locked
, 0);
177 * New entry. If we can't acquire the cache line then use the
178 * passed-in dummy element and reset all fields.
180 * When we are able to acquire the cache line we only clear the
181 * fields if the vp does not match. This allows us to multi-zone
182 * a vp and for excessive zones / partial clusters to be retired.
184 i
= cc
->iterator
++ & 3;
186 if (atomic_swap_int(&cc
->locked
, 1) != 0) {
203 cluster_putcache(cluster_cache_t
*cc
)
205 atomic_swap_int(&cc
->locked
, 0);
209 * This replaces bread(), providing a synchronous read of the requested
210 * buffer plus asynchronous read-ahead within the specified bounds.
212 * The caller may pre-populate *bpp if it already has the requested buffer
213 * in-hand, else must set *bpp to NULL. Note that the cluster_read() inline
214 * sets *bpp to NULL and then calls cluster_readx() for compatibility.
216 * filesize - read-ahead @ blksize will not cross this boundary
217 * loffset - loffset for returned *bpp
218 * blksize - blocksize for returned *bpp and read-ahead bps
219 * minreq - minimum (not a hard minimum) in bytes, typically reflects
220 * a higher level uio resid.
221 * maxreq - maximum (sequential heuristic) in bytes (highet typ ~2MB)
222 * bpp - return buffer (*bpp) for (loffset,blksize)
225 cluster_readx(struct vnode
*vp
, off_t filesize
, off_t loffset
, int blksize
,
226 int bflags
, size_t minreq
, size_t maxreq
,
229 struct buf
*bp
, *rbp
, *reqbp
;
237 int blkflags
= (bflags
& B_KVABIO
) ? GETBLK_KVABIO
: 0;
242 * Calculate the desired read-ahead in blksize'd blocks (maxra).
243 * To do this we calculate maxreq.
245 * maxreq typically starts out as a sequential heuristic. If the
246 * high level uio/resid is bigger (minreq), we pop maxreq up to
247 * minreq. This represents the case where random I/O is being
248 * performed by the userland is issuing big read()'s.
250 * Then we limit maxreq to max_readahead to ensure it is a reasonable
253 * Finally we must ensure that (loffset + maxreq) does not cross the
254 * boundary (filesize) for the current blocksize. If we allowed it
255 * to cross we could end up with buffers past the boundary with the
256 * wrong block size (HAMMER large-data areas use mixed block sizes).
257 * minreq is also absolutely limited to filesize.
261 /* minreq not used beyond this point */
263 if (maxreq
> max_readahead
) {
264 maxreq
= max_readahead
;
265 if (maxreq
> 16 * 1024 * 1024)
266 maxreq
= 16 * 1024 * 1024;
268 if (maxreq
< blksize
)
270 if (loffset
+ maxreq
> filesize
) {
271 if (loffset
> filesize
)
274 maxreq
= filesize
- loffset
;
277 maxra
= (int)(maxreq
/ blksize
);
280 * Get the requested block.
285 *bpp
= reqbp
= bp
= getblk(vp
, loffset
, blksize
, blkflags
, 0);
286 origoffset
= loffset
;
289 * Calculate the maximum cluster size for a single I/O, used
290 * by cluster_rbuild().
292 maxrbuild
= vmaxiosize(vp
) / blksize
;
295 * If it is in the cache, then check to see if the reads have been
296 * sequential. If they have, then try some read-ahead, otherwise
297 * back-off on prospective read-aheads.
299 if (bp
->b_flags
& B_CACHE
) {
301 * Not sequential, do not do any read-ahead
307 * No read-ahead mark, do not do any read-ahead
310 if ((bp
->b_flags
& B_RAM
) == 0)
314 * We hit a read-ahead-mark, figure out how much read-ahead
315 * to do (maxra) and where to start (loffset).
317 * Typically the way this works is that B_RAM is set in the
318 * middle of the cluster and triggers an overlapping
319 * read-ahead of 1/2 a cluster more blocks. This ensures
320 * that the cluster read-ahead scales with the read-ahead
321 * count and is thus better-able to absorb the caller's
324 * Estimate where the next unread block will be by assuming
325 * that the B_RAM's are placed at the half-way point.
327 bp
->b_flags
&= ~B_RAM
;
330 rbp
= findblk(vp
, loffset
+ i
* blksize
, FINDBLK_TEST
);
331 if (rbp
== NULL
|| (rbp
->b_flags
& B_CACHE
) == 0) {
334 rbp
= findblk(vp
, loffset
+ i
* blksize
,
343 rbp
= findblk(vp
, loffset
+ i
* blksize
,
352 * We got everything or everything is in the cache, no
359 * Calculate where to start the read-ahead and how much
360 * to do. Generally speaking we want to read-ahead by
361 * (maxra) when we've found a read-ahead mark. We do
362 * not want to reduce maxra here as it will cause
363 * successive read-ahead I/O's to be smaller and smaller.
365 * However, we have to make sure we don't break the
366 * filesize limitation for the clustered operation.
368 loffset
+= i
* blksize
;
371 if (loffset
>= filesize
)
373 if (loffset
+ maxra
* blksize
> filesize
) {
374 maxreq
= filesize
- loffset
;
375 maxra
= (int)(maxreq
/ blksize
);
379 * Set RAM on first read-ahead block since we still have
380 * approximate maxra/2 blocks ahead of us that are already
381 * cached or in-progress.
386 * Start block is not valid, we will want to do a
389 __debugvar off_t firstread
= bp
->b_loffset
;
393 * Set-up synchronous read for bp.
395 bp
->b_cmd
= BUF_CMD_READ
;
396 bp
->b_bio1
.bio_done
= biodone_sync
;
397 bp
->b_bio1
.bio_flags
|= BIO_SYNC
;
399 KASSERT(firstread
!= NOOFFSET
,
400 ("cluster_read: no buffer offset"));
402 nblks
= calc_rbuild_reqsize(maxra
, maxrbuild
);
405 * Set RAM half-way through the full-cluster.
407 sr
= (maxra
+ 1) / 2;
412 error
= VOP_BMAP(vp
, loffset
, &doffset
,
413 &burstbytes
, NULL
, BUF_CMD_READ
);
415 goto single_block_read
;
416 if (nblks
> burstbytes
/ blksize
)
417 nblks
= burstbytes
/ blksize
;
418 if (doffset
== NOOFFSET
)
419 goto single_block_read
;
421 goto single_block_read
;
423 bp
= cluster_rbuild(vp
, filesize
, loffset
,
424 doffset
, blksize
, nblks
, bp
, &sr
);
425 loffset
+= bp
->b_bufsize
;
426 maxra
-= bp
->b_bufsize
/ blksize
;
430 * If it isn't in the cache, then get a chunk from
431 * disk if sequential, otherwise just get the block.
439 * If B_CACHE was not set issue bp. bp will either be an
440 * asynchronous cluster buf or a synchronous single-buf.
441 * If it is a single buf it will be the same as reqbp.
443 * NOTE: Once an async cluster buf is issued bp becomes invalid.
446 #if defined(CLUSTERDEBUG)
448 kprintf("S(%012jx,%d,%d)\n",
449 (intmax_t)bp
->b_loffset
, bp
->b_bcount
, maxra
);
451 if ((bp
->b_flags
& B_CLUSTER
) == 0)
452 vfs_busy_pages(vp
, bp
);
453 bp
->b_flags
&= ~(B_ERROR
| B_INVAL
| B_NOTMETA
);
454 bp
->b_flags
|= bflags
;
455 vn_strategy(vp
, &bp
->b_bio1
);
460 #if defined(CLUSTERDEBUG)
462 kprintf("cluster_rd %016jx/%d maxra=%d sr=%d\n",
463 loffset
, blksize
, maxra
, sr
);
467 * If we have been doing sequential I/O, then do some read-ahead.
468 * The code above us should have positioned us at the next likely
471 * Only mess with buffers which we can immediately lock. HAMMER
472 * will do device-readahead irrespective of what the blocks
475 * Set B_RAM on the first buffer (the next likely offset needing
476 * read-ahead), under the assumption that there are still
477 * approximately maxra/2 blocks good ahead of us.
483 rbp
= getblk(vp
, loffset
, blksize
,
484 GETBLK_SZMATCH
| GETBLK_NOWAIT
| GETBLK_KVABIO
,
486 #if defined(CLUSTERDEBUG)
488 kprintf("read-ahead %016jx rbp=%p ",
494 if ((rbp
->b_flags
& B_CACHE
)) {
500 * If BMAP is not supported or has an issue, we still do
501 * (maxra) read-ahead, but we do not try to use rbuild.
503 error
= VOP_BMAP(vp
, loffset
, &doffset
,
504 &burstbytes
, NULL
, BUF_CMD_READ
);
505 if (error
|| doffset
== NOOFFSET
) {
509 nblks
= calc_rbuild_reqsize(maxra
, maxrbuild
);
510 if (nblks
> burstbytes
/ blksize
)
511 nblks
= burstbytes
/ blksize
;
513 rbp
->b_cmd
= BUF_CMD_READ
;
516 rbp
= cluster_rbuild(vp
, filesize
, loffset
,
520 rbp
->b_bio2
.bio_offset
= doffset
;
525 rbp
->b_flags
&= ~(B_ERROR
| B_INVAL
| B_NOTMETA
);
526 rbp
->b_flags
|= bflags
;
528 if ((rbp
->b_flags
& B_CLUSTER
) == 0)
529 vfs_busy_pages(vp
, rbp
);
531 loffset
+= rbp
->b_bufsize
;
532 maxra
-= rbp
->b_bufsize
/ blksize
;
533 vn_strategy(vp
, &rbp
->b_bio1
);
534 /* rbp invalid now */
538 * Wait for our original buffer to complete its I/O. reqbp will
539 * be NULL if the original buffer was B_CACHE. We are returning
540 * (*bpp) which is the same as reqbp when reqbp != NULL.
544 KKASSERT(reqbp
->b_bio1
.bio_flags
& BIO_SYNC
);
545 error
= biowait(&reqbp
->b_bio1
, "clurd");
553 * This replaces breadcb(), providing an asynchronous read of the requested
554 * buffer with a callback, plus an asynchronous read-ahead within the
557 * The callback must check whether BIO_DONE is set in the bio and issue
558 * the bpdone(bp, 0) if it isn't. The callback is responsible for clearing
559 * BIO_DONE and disposing of the I/O (bqrelse()ing it).
561 * filesize - read-ahead @ blksize will not cross this boundary
562 * loffset - loffset for returned *bpp
563 * blksize - blocksize for returned *bpp and read-ahead bps
564 * minreq - minimum (not a hard minimum) in bytes, typically reflects
565 * a higher level uio resid.
566 * maxreq - maximum (sequential heuristic) in bytes (highet typ ~2MB)
567 * bpp - return buffer (*bpp) for (loffset,blksize)
570 cluster_readcb(struct vnode
*vp
, off_t filesize
, off_t loffset
, int blksize
,
571 int bflags
, size_t minreq
, size_t maxreq
,
572 void (*func
)(struct bio
*), void *arg
)
574 struct buf
*bp
, *rbp
, *reqbp
;
581 int blkflags
= (bflags
& B_KVABIO
) ? GETBLK_KVABIO
: 0;
586 * Calculate the desired read-ahead in blksize'd blocks (maxra).
587 * To do this we calculate maxreq.
589 * maxreq typically starts out as a sequential heuristic. If the
590 * high level uio/resid is bigger (minreq), we pop maxreq up to
591 * minreq. This represents the case where random I/O is being
592 * performed by the userland is issuing big read()'s.
594 * Then we limit maxreq to max_readahead to ensure it is a reasonable
597 * Finally we must ensure that (loffset + maxreq) does not cross the
598 * boundary (filesize) for the current blocksize. If we allowed it
599 * to cross we could end up with buffers past the boundary with the
600 * wrong block size (HAMMER large-data areas use mixed block sizes).
601 * minreq is also absolutely limited to filesize.
605 /* minreq not used beyond this point */
607 if (maxreq
> max_readahead
) {
608 maxreq
= max_readahead
;
609 if (maxreq
> 16 * 1024 * 1024)
610 maxreq
= 16 * 1024 * 1024;
612 if (maxreq
< blksize
)
614 if (loffset
+ maxreq
> filesize
) {
615 if (loffset
> filesize
)
618 maxreq
= filesize
- loffset
;
621 maxra
= (int)(maxreq
/ blksize
);
624 * Get the requested block.
626 reqbp
= bp
= getblk(vp
, loffset
, blksize
, blkflags
, 0);
627 origoffset
= loffset
;
630 * Calculate the maximum cluster size for a single I/O, used
631 * by cluster_rbuild().
633 maxrbuild
= vmaxiosize(vp
) / blksize
;
636 * if it is in the cache, then check to see if the reads have been
637 * sequential. If they have, then try some read-ahead, otherwise
638 * back-off on prospective read-aheads.
640 if (bp
->b_flags
& B_CACHE
) {
642 * Setup for func() call whether we do read-ahead or not.
644 bp
->b_bio1
.bio_caller_info1
.ptr
= arg
;
645 bp
->b_bio1
.bio_flags
|= BIO_DONE
;
648 * Not sequential, do not do any read-ahead
654 * No read-ahead mark, do not do any read-ahead
657 if ((bp
->b_flags
& B_RAM
) == 0)
659 bp
->b_flags
&= ~B_RAM
;
662 * We hit a read-ahead-mark, figure out how much read-ahead
663 * to do (maxra) and where to start (loffset).
665 * Shortcut the scan. Typically the way this works is that
666 * we've built up all the blocks inbetween except for the
667 * last in previous iterations, so if the second-to-last
668 * block is present we just skip ahead to it.
670 * This algorithm has O(1) cpu in the steady state no
671 * matter how large maxra is.
673 if (findblk(vp
, loffset
+ (maxra
- 2) * blksize
, FINDBLK_TEST
))
678 if (findblk(vp
, loffset
+ i
* blksize
,
679 FINDBLK_TEST
) == NULL
) {
686 * We got everything or everything is in the cache, no
693 * Calculate where to start the read-ahead and how much
694 * to do. Generally speaking we want to read-ahead by
695 * (maxra) when we've found a read-ahead mark. We do
696 * not want to reduce maxra here as it will cause
697 * successive read-ahead I/O's to be smaller and smaller.
699 * However, we have to make sure we don't break the
700 * filesize limitation for the clustered operation.
702 loffset
+= i
* blksize
;
704 /* leave reqbp intact to force function callback */
706 if (loffset
>= filesize
)
708 if (loffset
+ maxra
* blksize
> filesize
) {
709 maxreq
= filesize
- loffset
;
710 maxra
= (int)(maxreq
/ blksize
);
715 * bp is not valid, no prior cluster in progress so get a
716 * full cluster read-ahead going.
718 __debugvar off_t firstread
= bp
->b_loffset
;
723 * Set-up synchronous read for bp.
725 bp
->b_flags
&= ~(B_ERROR
| B_EINTR
| B_INVAL
| B_NOTMETA
);
726 bp
->b_flags
|= bflags
;
727 bp
->b_cmd
= BUF_CMD_READ
;
728 bp
->b_bio1
.bio_done
= func
;
729 bp
->b_bio1
.bio_caller_info1
.ptr
= arg
;
731 reqbp
= NULL
; /* don't func() reqbp, it's running async */
733 KASSERT(firstread
!= NOOFFSET
,
734 ("cluster_read: no buffer offset"));
737 * nblks is our cluster_rbuild request size, limited
738 * primarily by the device.
740 nblks
= calc_rbuild_reqsize(maxra
, maxrbuild
);
743 * Set RAM half-way through the full-cluster.
745 sr
= (maxra
+ 1) / 2;
750 error
= VOP_BMAP(vp
, loffset
, &doffset
,
751 &burstbytes
, NULL
, BUF_CMD_READ
);
753 goto single_block_read
;
754 if (nblks
> burstbytes
/ blksize
)
755 nblks
= burstbytes
/ blksize
;
756 if (doffset
== NOOFFSET
)
757 goto single_block_read
;
759 goto single_block_read
;
761 bp
= cluster_rbuild(vp
, filesize
, loffset
,
762 doffset
, blksize
, nblks
, bp
, &sr
);
763 loffset
+= bp
->b_bufsize
;
764 maxra
-= bp
->b_bufsize
/ blksize
;
768 * If it isn't in the cache, then get a chunk from
769 * disk if sequential, otherwise just get the block.
777 * If bp != NULL then B_CACHE was *NOT* set and bp must be issued.
778 * bp will either be an asynchronous cluster buf or an asynchronous
781 * NOTE: Once an async cluster buf is issued bp becomes invalid.
784 #if defined(CLUSTERDEBUG)
786 kprintf("S(%012jx,%d,%d)\n",
787 (intmax_t)bp
->b_loffset
, bp
->b_bcount
, maxra
);
789 if ((bp
->b_flags
& B_CLUSTER
) == 0)
790 vfs_busy_pages(vp
, bp
);
791 bp
->b_flags
&= ~(B_ERROR
| B_INVAL
| B_NOTMETA
);
792 bp
->b_flags
|= bflags
;
793 vn_strategy(vp
, &bp
->b_bio1
);
798 #if defined(CLUSTERDEBUG)
800 kprintf("cluster_rd %016jx/%d maxra=%d sr=%d\n",
801 loffset
, blksize
, maxra
, sr
);
805 * If we have been doing sequential I/O, then do some read-ahead.
806 * The code above us should have positioned us at the next likely
809 * Only mess with buffers which we can immediately lock. HAMMER
810 * will do device-readahead irrespective of what the blocks
818 rbp
= getblk(vp
, loffset
, blksize
,
819 GETBLK_SZMATCH
| GETBLK_NOWAIT
| GETBLK_KVABIO
,
823 if ((rbp
->b_flags
& B_CACHE
)) {
829 * If BMAP is not supported or has an issue, we still do
830 * (maxra) read-ahead, but we do not try to use rbuild.
832 error
= VOP_BMAP(vp
, loffset
, &doffset
,
833 &burstbytes
, NULL
, BUF_CMD_READ
);
834 if (error
|| doffset
== NOOFFSET
) {
838 nblks
= calc_rbuild_reqsize(maxra
, maxrbuild
);
839 if (nblks
> burstbytes
/ blksize
)
840 nblks
= burstbytes
/ blksize
;
842 rbp
->b_cmd
= BUF_CMD_READ
;
845 rbp
= cluster_rbuild(vp
, filesize
, loffset
,
849 rbp
->b_bio2
.bio_offset
= doffset
;
854 rbp
->b_flags
&= ~(B_ERROR
| B_INVAL
| B_NOTMETA
);
855 rbp
->b_flags
|= bflags
;
857 if ((rbp
->b_flags
& B_CLUSTER
) == 0)
858 vfs_busy_pages(vp
, rbp
);
860 loffset
+= rbp
->b_bufsize
;
861 maxra
-= rbp
->b_bufsize
/ blksize
;
862 vn_strategy(vp
, &rbp
->b_bio1
);
863 /* rbp invalid now */
867 * If reqbp is non-NULL it had B_CACHE set and we issue the
868 * function callback synchronously.
870 * Note that we may start additional asynchronous I/O before doing
871 * the func() callback for the B_CACHE case
875 func(&reqbp
->b_bio1
);
879 * If blocks are contiguous on disk, use this to provide clustered
880 * read ahead. We will read as many blocks as possible sequentially
881 * and then parcel them up into logical blocks in the buffer hash table.
883 * This function either returns a cluster buf or it returns fbp. fbp is
884 * already expected to be set up as a synchronous or asynchronous request.
886 * If a cluster buf is returned it will always be async.
888 * (*srp) counts down original blocks to determine where B_RAM should be set.
889 * Set B_RAM when *srp drops to 0. If (*srp) starts at 0, B_RAM will not be
890 * set on any buffer. Make sure B_RAM is cleared on any other buffers to
891 * prevent degenerate read-aheads from being generated.
894 cluster_rbuild(struct vnode
*vp
, off_t filesize
, off_t loffset
, off_t doffset
,
895 int blksize
, int run
, struct buf
*fbp
, int *srp
)
897 struct buf
*bp
, *tbp
;
900 int maxiosize
= vmaxiosize(vp
);
905 while (loffset
+ run
* blksize
> filesize
) {
910 tbp
->b_bio2
.bio_offset
= doffset
;
911 if (((tbp
->b_flags
& B_VMIO
) == 0) || (run
<= 1)) {
920 * Get a pbuf, limit cluster I/O on a per-device basis. If
921 * doing cluster I/O for a file, limit cluster I/O on a
924 if (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
)
925 bp
= trypbuf_kva(&vp
->v_pbuf_count
);
927 bp
= trypbuf_kva(&vp
->v_mount
->mnt_pbuf_count
);
933 * We are synthesizing a buffer out of vm_page_t's, but
934 * if the block size is not page aligned then the starting
935 * address may not be either. Inherit the b_data offset
936 * from the original buffer.
939 bp
->b_data
= (char *)((vm_offset_t
)bp
->b_data
|
940 ((vm_offset_t
)tbp
->b_data
& PAGE_MASK
));
941 bp
->b_flags
|= B_CLUSTER
| B_VMIO
| B_KVABIO
;
942 bp
->b_cmd
= BUF_CMD_READ
;
943 bp
->b_bio1
.bio_done
= cluster_callback
; /* default to async */
944 bp
->b_bio1
.bio_caller_info1
.cluster_head
= NULL
;
945 bp
->b_bio1
.bio_caller_info2
.cluster_tail
= NULL
;
946 bp
->b_loffset
= loffset
;
947 bp
->b_bio2
.bio_offset
= doffset
;
948 KASSERT(bp
->b_loffset
!= NOOFFSET
,
949 ("cluster_rbuild: no buffer offset"));
953 bp
->b_xio
.xio_npages
= 0;
955 for (boffset
= doffset
, i
= 0; i
< run
; ++i
, boffset
+= blksize
) {
957 if ((bp
->b_xio
.xio_npages
* PAGE_SIZE
) +
958 round_page(blksize
) > maxiosize
) {
963 * Shortcut some checks and try to avoid buffers that
964 * would block in the lock. The same checks have to
965 * be made again after we officially get the buffer.
967 tbp
= getblk(vp
, loffset
+ i
* blksize
, blksize
,
974 for (j
= 0; j
< tbp
->b_xio
.xio_npages
; j
++) {
975 if (tbp
->b_xio
.xio_pages
[j
]->valid
)
978 if (j
!= tbp
->b_xio
.xio_npages
) {
984 * Stop scanning if the buffer is fuly valid
985 * (marked B_CACHE), or locked (may be doing a
986 * background write), or if the buffer is not
987 * VMIO backed. The clustering code can only deal
988 * with VMIO-backed buffers.
990 if ((tbp
->b_flags
& (B_CACHE
|B_LOCKED
)) ||
991 (tbp
->b_flags
& B_VMIO
) == 0 ||
992 (LIST_FIRST(&tbp
->b_dep
) != NULL
&&
1000 * The buffer must be completely invalid in order to
1001 * take part in the cluster. If it is partially valid
1004 for (j
= 0;j
< tbp
->b_xio
.xio_npages
; j
++) {
1005 if (tbp
->b_xio
.xio_pages
[j
]->valid
)
1008 if (j
!= tbp
->b_xio
.xio_npages
) {
1014 * Depress the priority of buffers not explicitly
1017 /* tbp->b_flags |= B_AGE; */
1020 * Set the block number if it isn't set, otherwise
1021 * if it is make sure it matches the block number we
1024 if (tbp
->b_bio2
.bio_offset
== NOOFFSET
) {
1025 tbp
->b_bio2
.bio_offset
= boffset
;
1026 } else if (tbp
->b_bio2
.bio_offset
!= boffset
) {
1033 * Set B_RAM if (*srp) is 1. B_RAM is only set on one buffer
1034 * in the cluster, including potentially the first buffer
1035 * once we start streaming the read-aheads.
1038 cluster_setram(tbp
);
1040 cluster_clrram(tbp
);
1043 * The passed-in tbp (i == 0) will already be set up for
1044 * async or sync operation. All other tbp's acquire in
1045 * our loop are set up for async operation.
1047 tbp
->b_cmd
= BUF_CMD_READ
;
1049 cluster_append(&bp
->b_bio1
, tbp
);
1050 for (j
= 0; j
< tbp
->b_xio
.xio_npages
; ++j
) {
1053 m
= tbp
->b_xio
.xio_pages
[j
];
1054 vm_page_busy_wait(m
, FALSE
, "clurpg");
1055 vm_page_io_start(m
);
1057 vm_object_pip_add(m
->object
, 1);
1058 if ((bp
->b_xio
.xio_npages
== 0) ||
1059 (bp
->b_xio
.xio_pages
[bp
->b_xio
.xio_npages
-1] != m
)) {
1060 bp
->b_xio
.xio_pages
[bp
->b_xio
.xio_npages
] = m
;
1061 bp
->b_xio
.xio_npages
++;
1063 if ((m
->valid
& VM_PAGE_BITS_ALL
) == VM_PAGE_BITS_ALL
) {
1064 tbp
->b_xio
.xio_pages
[j
] = bogus_page
;
1065 tbp
->b_flags
|= B_HASBOGUS
;
1069 * XXX shouldn't this be += size for both, like in
1072 * Don't inherit tbp->b_bufsize as it may be larger due to
1073 * a non-page-aligned size. Instead just aggregate using
1076 if (tbp
->b_bcount
!= blksize
)
1077 kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp
->b_bcount
, blksize
);
1078 if (tbp
->b_bufsize
!= blksize
)
1079 kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp
->b_bufsize
, blksize
);
1080 bp
->b_bcount
+= blksize
;
1081 bp
->b_bufsize
+= blksize
;
1085 * Fully valid pages in the cluster are already good and do not need
1086 * to be re-read from disk. Replace the page with bogus_page
1088 for (j
= 0; j
< bp
->b_xio
.xio_npages
; j
++) {
1089 if ((bp
->b_xio
.xio_pages
[j
]->valid
& VM_PAGE_BITS_ALL
) ==
1091 bp
->b_xio
.xio_pages
[j
] = bogus_page
;
1092 bp
->b_flags
|= B_HASBOGUS
;
1095 if (bp
->b_bufsize
> bp
->b_kvasize
) {
1096 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
1097 bp
->b_bufsize
, bp
->b_kvasize
);
1099 pmap_qenter_noinval(trunc_page((vm_offset_t
)bp
->b_data
),
1100 (vm_page_t
*)bp
->b_xio
.xio_pages
,
1101 bp
->b_xio
.xio_npages
);
1107 * Cleanup after a clustered read or write.
1108 * This is complicated by the fact that any of the buffers might have
1109 * extra memory (if there were no empty buffer headers at allocbuf time)
1110 * that we will need to shift around.
1112 * The returned bio is &bp->b_bio1
1115 cluster_callback(struct bio
*bio
)
1117 struct buf
*bp
= bio
->bio_buf
;
1125 * Must propogate errors to all the components. A short read (EOF)
1126 * is a critical error.
1128 if (bp
->b_flags
& B_ERROR
) {
1129 error
= bp
->b_error
;
1130 } else if (bp
->b_bcount
!= bp
->b_bufsize
) {
1131 panic("cluster_callback: unexpected EOF on cluster %p!", bio
);
1134 pmap_qremove_noinval(trunc_page((vm_offset_t
) bp
->b_data
),
1135 bp
->b_xio
.xio_npages
);
1138 * Retrieve the cluster head and dispose of the cluster buffer.
1139 * the vp is only valid while we hold one or more cluster elements,
1140 * so we have to do this before disposing of them.
1142 tbp
= bio
->bio_caller_info1
.cluster_head
;
1143 bio
->bio_caller_info1
.cluster_head
= NULL
;
1144 bpflags
= bp
->b_flags
;
1148 if (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
)
1149 relpbuf(bp
, &vp
->v_pbuf_count
);
1151 relpbuf(bp
, &vp
->v_mount
->mnt_pbuf_count
);
1152 bp
= NULL
; /* SAFETY */
1155 * Move memory from the large cluster buffer into the component
1156 * buffers and mark IO as done on these. Since the memory map
1157 * is the same, no actual copying is required.
1159 * (And we already disposed of the larger cluster buffer)
1162 next
= tbp
->b_cluster_next
;
1164 tbp
->b_flags
|= B_ERROR
| B_IOISSUED
;
1165 tbp
->b_error
= error
;
1167 tbp
->b_dirtyoff
= tbp
->b_dirtyend
= 0;
1168 tbp
->b_flags
&= ~(B_ERROR
| B_INVAL
);
1169 if (tbp
->b_cmd
== BUF_CMD_READ
) {
1170 tbp
->b_flags
= (tbp
->b_flags
& ~B_NOTMETA
) |
1171 (bpflags
& B_NOTMETA
);
1173 tbp
->b_flags
|= B_IOISSUED
;
1176 * XXX the bdwrite()/bqrelse() issued during
1177 * cluster building clears B_RELBUF (see bqrelse()
1178 * comment). If direct I/O was specified, we have
1179 * to restore it here to allow the buffer and VM
1182 if (tbp
->b_flags
& B_DIRECT
)
1183 tbp
->b_flags
|= B_RELBUF
;
1186 * XXX I think biodone() below will do this, but do
1187 * it here anyway for consistency.
1189 if (tbp
->b_cmd
== BUF_CMD_WRITE
)
1192 biodone(&tbp
->b_bio1
);
1198 * Implement modified write build for cluster.
1200 * write_behind = 0 write behind disabled
1201 * write_behind = 1 write behind normal (default)
1202 * write_behind = 2 write behind backed-off
1204 * In addition, write_behind is only activated for files that have
1205 * grown past a certain size (default 10MB). Otherwise temporary files
1206 * wind up generating a lot of unnecessary disk I/O.
1209 cluster_wbuild_wb(struct vnode
*vp
, int blksize
, off_t start_loffset
, int len
)
1213 switch(write_behind
) {
1215 if (start_loffset
< len
)
1217 start_loffset
-= len
;
1220 if (vp
->v_filesize
>= write_behind_minfilesize
) {
1221 r
= cluster_wbuild(vp
, NULL
, blksize
,
1222 start_loffset
, len
);
1233 * Do clustered write for FFS.
1236 * 1. Write is not sequential (write asynchronously)
1237 * Write is sequential:
1238 * 2. beginning of cluster - begin cluster
1239 * 3. middle of a cluster - add to cluster
1240 * 4. end of a cluster - asynchronously write cluster
1242 * WARNING! vnode fields are not locked and must ONLY be used heuristically.
1245 cluster_write(struct buf
*bp
, off_t filesize
, int blksize
, int seqcount
)
1249 int maxclen
, cursize
;
1251 cluster_cache_t dummy
;
1252 cluster_cache_t
*cc
;
1255 if (vp
->v_type
== VREG
)
1256 async
= vp
->v_mount
->mnt_flag
& MNT_ASYNC
;
1259 loffset
= bp
->b_loffset
;
1260 KASSERT(bp
->b_loffset
!= NOOFFSET
,
1261 ("cluster_write: no buffer offset"));
1263 cc
= cluster_getcache(&dummy
, vp
, loffset
);
1266 * Initialize vnode to beginning of file.
1269 cc
->v_lasta
= cc
->v_clen
= cc
->v_cstart
= cc
->v_lastw
= 0;
1271 if (cc
->v_clen
== 0 || loffset
!= cc
->v_lastw
||
1272 (bp
->b_bio2
.bio_offset
!= NOOFFSET
&&
1273 (bp
->b_bio2
.bio_offset
!= cc
->v_lasta
))) {
1275 * Next block is not logically sequential, or, if physical
1276 * block offsets are available, not physically sequential.
1278 * If physical block offsets are not available we only
1279 * get here if we weren't logically sequential.
1281 maxclen
= vmaxiosize(vp
);
1282 if (cc
->v_clen
!= 0) {
1284 * Next block is not sequential.
1286 * If we are not writing at end of file, the process
1287 * seeked to another point in the file since its last
1288 * write, or we have reached our maximum cluster size,
1289 * then push the previous cluster. Otherwise try
1290 * reallocating to make it sequential.
1292 * Change to algorithm: only push previous cluster if
1293 * it was sequential from the point of view of the
1294 * seqcount heuristic, otherwise leave the buffer
1295 * intact so we can potentially optimize the I/O
1296 * later on in the buf_daemon or update daemon
1299 cursize
= cc
->v_lastw
- cc
->v_cstart
;
1300 if (bp
->b_loffset
+ blksize
< filesize
||
1301 loffset
!= cc
->v_lastw
||
1302 cc
->v_clen
<= cursize
) {
1303 if (!async
&& seqcount
> 0) {
1304 cluster_wbuild_wb(vp
, blksize
,
1305 cc
->v_cstart
, cursize
);
1308 struct buf
**bpp
, **endbp
;
1309 struct cluster_save
*buflist
;
1311 buflist
= cluster_collectbufs(cc
, vp
,
1313 endbp
= &buflist
->bs_children
1314 [buflist
->bs_nchildren
- 1];
1315 if (VOP_REALLOCBLKS(vp
, buflist
)) {
1317 * Failed, push the previous cluster
1318 * if *really* writing sequentially
1319 * in the logical file (seqcount > 1),
1320 * otherwise delay it in the hopes that
1321 * the low level disk driver can
1322 * optimize the write ordering.
1324 * NOTE: We do not brelse the last
1325 * element which is bp, and we
1326 * do not return here.
1328 for (bpp
= buflist
->bs_children
;
1331 kfree(buflist
, M_SEGMENT
);
1333 cluster_wbuild_wb(vp
,
1334 blksize
, cc
->v_cstart
,
1339 * Succeeded, keep building cluster.
1341 for (bpp
= buflist
->bs_children
;
1342 bpp
<= endbp
; bpp
++)
1344 kfree(buflist
, M_SEGMENT
);
1345 cc
->v_lastw
= loffset
+ blksize
;
1346 cc
->v_lasta
= bp
->b_bio2
.bio_offset
+
1348 cluster_putcache(cc
);
1355 * Consider beginning a cluster. If at end of file, make
1356 * cluster as large as possible, otherwise find size of
1359 if ((vp
->v_type
== VREG
) &&
1360 bp
->b_loffset
+ blksize
< filesize
&&
1361 (bp
->b_bio2
.bio_offset
== NOOFFSET
) &&
1362 (VOP_BMAP(vp
, loffset
, &bp
->b_bio2
.bio_offset
, &maxclen
, NULL
, BUF_CMD_WRITE
) ||
1363 bp
->b_bio2
.bio_offset
== NOOFFSET
)) {
1366 cc
->v_lasta
= bp
->b_bio2
.bio_offset
+ blksize
;
1367 cc
->v_cstart
= loffset
;
1368 cc
->v_lastw
= loffset
+ blksize
;
1369 cluster_putcache(cc
);
1372 if (maxclen
> blksize
)
1373 cc
->v_clen
= maxclen
;
1375 cc
->v_clen
= blksize
;
1376 if (!async
&& cc
->v_clen
== 0) { /* I/O not contiguous */
1377 cc
->v_cstart
= loffset
;
1379 } else { /* Wait for rest of cluster */
1380 cc
->v_cstart
= loffset
;
1383 } else if (loffset
== cc
->v_cstart
+ cc
->v_clen
) {
1385 * At end of cluster, write it out if seqcount tells us we
1386 * are operating sequentially, otherwise let the buf or
1387 * update daemon handle it.
1391 cluster_wbuild_wb(vp
, blksize
, cc
->v_cstart
,
1392 cc
->v_clen
+ blksize
);
1394 cc
->v_cstart
= loffset
;
1395 } else if (vm_page_count_severe() &&
1396 bp
->b_loffset
+ blksize
< filesize
) {
1398 * We are low on memory, get it going NOW. However, do not
1399 * try to push out a partial block at the end of the file
1400 * as this could lead to extremely non-optimal write activity.
1405 * In the middle of a cluster, so just delay the I/O for now.
1409 cc
->v_lastw
= loffset
+ blksize
;
1410 cc
->v_lasta
= bp
->b_bio2
.bio_offset
+ blksize
;
1411 cluster_putcache(cc
);
1415 * This is the clustered version of bawrite(). It works similarly to
1416 * cluster_write() except I/O on the buffer is guaranteed to occur.
1419 cluster_awrite(struct buf
*bp
)
1424 * Don't bother if it isn't clusterable.
1426 if ((bp
->b_flags
& B_CLUSTEROK
) == 0 ||
1428 (bp
->b_vp
->v_flag
& VOBJBUF
) == 0) {
1429 total
= bp
->b_bufsize
;
1434 total
= cluster_wbuild(bp
->b_vp
, &bp
, bp
->b_bufsize
,
1435 bp
->b_loffset
, vmaxiosize(bp
->b_vp
));
1438 * If bp is still non-NULL then cluster_wbuild() did not initiate
1439 * I/O on it and we must do so here to provide the API guarantee.
1448 * This is an awful lot like cluster_rbuild...wish they could be combined.
1449 * The last lbn argument is the current block on which I/O is being
1450 * performed. Check to see that it doesn't fall in the middle of
1451 * the current block (if last_bp == NULL).
1453 * cluster_wbuild() normally does not guarantee anything. If bpp is
1454 * non-NULL and cluster_wbuild() is able to incorporate it into the
1455 * I/O it will set *bpp to NULL, otherwise it will leave it alone and
1456 * the caller must dispose of *bpp.
1459 cluster_wbuild(struct vnode
*vp
, struct buf
**bpp
,
1460 int blksize
, off_t start_loffset
, int bytes
)
1462 struct buf
*bp
, *tbp
;
1464 int totalwritten
= 0;
1466 int maxiosize
= vmaxiosize(vp
);
1470 * If the buffer matches the passed locked & removed buffer
1471 * we used the passed buffer (which might not be B_DELWRI).
1473 * Otherwise locate the buffer and determine if it is
1476 if (bpp
&& (*bpp
)->b_loffset
== start_loffset
) {
1481 tbp
= findblk(vp
, start_loffset
, FINDBLK_NBLOCK
|
1484 (tbp
->b_flags
& (B_LOCKED
| B_INVAL
| B_DELWRI
)) !=
1486 (LIST_FIRST(&tbp
->b_dep
) && buf_checkwrite(tbp
))) {
1489 start_loffset
+= blksize
;
1495 KKASSERT(tbp
->b_cmd
== BUF_CMD_DONE
);
1498 * Extra memory in the buffer, punt on this buffer.
1499 * XXX we could handle this in most cases, but we would
1500 * have to push the extra memory down to after our max
1501 * possible cluster size and then potentially pull it back
1502 * up if the cluster was terminated prematurely--too much
1505 if ((tbp
->b_flags
& B_CLUSTEROK
) == 0 ||
1506 tbp
->b_bcount
!= tbp
->b_bufsize
||
1507 tbp
->b_bcount
!= blksize
||
1509 totalwritten
+= tbp
->b_bufsize
;
1511 start_loffset
+= blksize
;
1517 * Get a pbuf, limit cluster I/O on a per-device basis. If
1518 * doing cluster I/O for a file, limit cluster I/O on a
1521 * HAMMER and other filesystems may attempt to queue a massive
1522 * amount of write I/O, using trypbuf() here easily results in
1523 * situation where the I/O stream becomes non-clustered.
1525 if (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
)
1526 bp
= getpbuf_kva(&vp
->v_pbuf_count
);
1528 bp
= getpbuf_kva(&vp
->v_mount
->mnt_pbuf_count
);
1531 * Set up the pbuf. Track our append point with b_bcount
1532 * and b_bufsize. b_bufsize is not used by the device but
1533 * our caller uses it to loop clusters and we use it to
1534 * detect a premature EOF on the block device.
1538 bp
->b_xio
.xio_npages
= 0;
1539 bp
->b_loffset
= tbp
->b_loffset
;
1540 bp
->b_bio2
.bio_offset
= tbp
->b_bio2
.bio_offset
;
1544 * We are synthesizing a buffer out of vm_page_t's, but
1545 * if the block size is not page aligned then the starting
1546 * address may not be either. Inherit the b_data offset
1547 * from the original buffer.
1549 bp
->b_data
= (char *)((vm_offset_t
)bp
->b_data
|
1550 ((vm_offset_t
)tbp
->b_data
& PAGE_MASK
));
1551 bp
->b_flags
&= ~(B_ERROR
| B_NOTMETA
);
1552 bp
->b_flags
|= B_CLUSTER
| B_BNOCLIP
| B_KVABIO
|
1553 (tbp
->b_flags
& (B_VMIO
| B_NEEDCOMMIT
|
1555 bp
->b_bio1
.bio_caller_info1
.cluster_head
= NULL
;
1556 bp
->b_bio1
.bio_caller_info2
.cluster_tail
= NULL
;
1559 * From this location in the file, scan forward to see
1560 * if there are buffers with adjacent data that need to
1561 * be written as well.
1563 * IO *must* be initiated on index 0 at this point
1564 * (particularly when called from cluster_awrite()).
1566 for (i
= 0; i
< bytes
; (i
+= blksize
), (start_loffset
+= blksize
)) {
1574 tbp
= findblk(vp
, start_loffset
,
1575 FINDBLK_NBLOCK
| FINDBLK_KVABIO
);
1577 * Buffer not found or could not be locked
1584 * If it IS in core, but has different
1585 * characteristics, then don't cluster
1588 if ((tbp
->b_flags
& (B_VMIO
| B_CLUSTEROK
|
1589 B_INVAL
| B_DELWRI
| B_NEEDCOMMIT
))
1590 != (B_DELWRI
| B_CLUSTEROK
|
1591 (bp
->b_flags
& (B_VMIO
| B_NEEDCOMMIT
))) ||
1592 (tbp
->b_flags
& B_LOCKED
)
1599 * Check that the combined cluster
1600 * would make sense with regard to pages
1601 * and would not be too large
1603 * WARNING! buf_checkwrite() must be the last
1604 * check made. If it returns 0 then
1605 * we must initiate the I/O.
1607 if ((tbp
->b_bcount
!= blksize
) ||
1608 ((bp
->b_bio2
.bio_offset
+ i
) !=
1609 tbp
->b_bio2
.bio_offset
) ||
1610 ((tbp
->b_xio
.xio_npages
+ bp
->b_xio
.xio_npages
) >
1611 (maxiosize
/ PAGE_SIZE
)) ||
1612 (LIST_FIRST(&tbp
->b_dep
) &&
1613 buf_checkwrite(tbp
))
1618 if (LIST_FIRST(&tbp
->b_dep
))
1621 * Ok, it's passed all the tests,
1622 * so remove it from the free list
1623 * and mark it busy. We will use it.
1626 KKASSERT(tbp
->b_cmd
== BUF_CMD_DONE
);
1630 * If the IO is via the VM then we do some
1631 * special VM hackery (yuck). Since the buffer's
1632 * block size may not be page-aligned it is possible
1633 * for a page to be shared between two buffers. We
1634 * have to get rid of the duplication when building
1637 if (tbp
->b_flags
& B_VMIO
) {
1641 * Try to avoid deadlocks with the VM system.
1642 * However, we cannot abort the I/O if
1643 * must_initiate is non-zero.
1645 if (must_initiate
== 0) {
1647 j
< tbp
->b_xio
.xio_npages
;
1649 m
= tbp
->b_xio
.xio_pages
[j
];
1658 for (j
= 0; j
< tbp
->b_xio
.xio_npages
; ++j
) {
1659 m
= tbp
->b_xio
.xio_pages
[j
];
1660 vm_page_busy_wait(m
, FALSE
, "clurpg");
1661 vm_page_io_start(m
);
1663 vm_object_pip_add(m
->object
, 1);
1664 if ((bp
->b_xio
.xio_npages
== 0) ||
1665 (bp
->b_xio
.xio_pages
[bp
->b_xio
.xio_npages
- 1] != m
)) {
1666 bp
->b_xio
.xio_pages
[bp
->b_xio
.xio_npages
] = m
;
1667 bp
->b_xio
.xio_npages
++;
1671 bp
->b_bcount
+= blksize
;
1672 bp
->b_bufsize
+= blksize
;
1675 * NOTE: see bwrite/bawrite code for why we no longer
1678 * bundirty(tbp); REMOVED
1680 tbp
->b_flags
&= ~B_ERROR
;
1681 tbp
->b_cmd
= BUF_CMD_WRITE
;
1683 cluster_append(&bp
->b_bio1
, tbp
);
1686 * check for latent dependencies to be handled
1688 if (LIST_FIRST(&tbp
->b_dep
) != NULL
)
1692 pmap_qenter_noinval(trunc_page((vm_offset_t
)bp
->b_data
),
1693 (vm_page_t
*)bp
->b_xio
.xio_pages
,
1694 bp
->b_xio
.xio_npages
);
1695 if (bp
->b_bufsize
> bp
->b_kvasize
) {
1696 panic("cluster_wbuild: b_bufsize(%d) "
1697 "> b_kvasize(%d)\n",
1698 bp
->b_bufsize
, bp
->b_kvasize
);
1700 totalwritten
+= bp
->b_bufsize
;
1702 bp
->b_dirtyend
= bp
->b_bufsize
;
1703 bp
->b_bio1
.bio_done
= cluster_callback
;
1704 bp
->b_cmd
= BUF_CMD_WRITE
;
1706 vfs_busy_pages(vp
, bp
);
1707 bsetrunningbufspace(bp
, bp
->b_bufsize
);
1709 vn_strategy(vp
, &bp
->b_bio1
);
1713 return totalwritten
;
1717 * Collect together all the buffers in a cluster, plus add one
1718 * additional buffer passed-in.
1720 * Only pre-existing buffers whos block size matches blksize are collected.
1721 * (this is primarily because HAMMER1 uses varying block sizes and we don't
1722 * want to override its choices).
1724 * This code will not try to collect buffers that it cannot lock, otherwise
1725 * it might deadlock against SMP-friendly filesystems.
1727 static struct cluster_save
*
1728 cluster_collectbufs(cluster_cache_t
*cc
, struct vnode
*vp
,
1729 struct buf
*last_bp
, int blksize
)
1731 struct cluster_save
*buflist
;
1738 len
= (int)(cc
->v_lastw
- cc
->v_cstart
) / blksize
;
1740 buflist
= kmalloc(sizeof(struct buf
*) * (len
+ 1) + sizeof(*buflist
),
1741 M_SEGMENT
, M_WAITOK
);
1742 buflist
->bs_nchildren
= 0;
1743 buflist
->bs_children
= (struct buf
**) (buflist
+ 1);
1744 for (loffset
= cc
->v_cstart
, i
= 0, j
= 0;
1746 (loffset
+= blksize
), i
++) {
1747 bp
= getcacheblk(vp
, loffset
,
1748 last_bp
->b_bcount
, GETBLK_SZMATCH
|
1750 buflist
->bs_children
[i
] = bp
;
1753 } else if (bp
->b_bio2
.bio_offset
== NOOFFSET
) {
1754 VOP_BMAP(bp
->b_vp
, bp
->b_loffset
,
1755 &bp
->b_bio2
.bio_offset
,
1756 NULL
, NULL
, BUF_CMD_WRITE
);
1763 for (k
= 0; k
< j
; ++k
) {
1764 if (buflist
->bs_children
[k
]) {
1765 bqrelse(buflist
->bs_children
[k
]);
1766 buflist
->bs_children
[k
] = NULL
;
1771 bcopy(buflist
->bs_children
+ j
,
1772 buflist
->bs_children
+ 0,
1773 sizeof(buflist
->bs_children
[0]) * (i
- j
));
1777 buflist
->bs_children
[i
] = bp
= last_bp
;
1778 if (bp
->b_bio2
.bio_offset
== NOOFFSET
) {
1779 VOP_BMAP(bp
->b_vp
, bp
->b_loffset
, &bp
->b_bio2
.bio_offset
,
1780 NULL
, NULL
, BUF_CMD_WRITE
);
1782 buflist
->bs_nchildren
= i
+ 1;
1787 cluster_append(struct bio
*bio
, struct buf
*tbp
)
1789 tbp
->b_cluster_next
= NULL
;
1790 if (bio
->bio_caller_info1
.cluster_head
== NULL
) {
1791 bio
->bio_caller_info1
.cluster_head
= tbp
;
1792 bio
->bio_caller_info2
.cluster_tail
= tbp
;
1794 bio
->bio_caller_info2
.cluster_tail
->b_cluster_next
= tbp
;
1795 bio
->bio_caller_info2
.cluster_tail
= tbp
;
1801 cluster_setram(struct buf
*bp
)
1803 bp
->b_flags
|= B_RAM
;
1804 if (bp
->b_xio
.xio_npages
)
1805 vm_page_flag_set(bp
->b_xio
.xio_pages
[0], PG_RAM
);
1810 cluster_clrram(struct buf
*bp
)
1812 bp
->b_flags
&= ~B_RAM
;
1813 if (bp
->b_xio
.xio_npages
)
1814 vm_page_flag_clear(bp
->b_xio
.xio_pages
[0], PG_RAM
);