3 * The Regents of the University of California. All rights reserved.
4 * Modifications/enhancements:
5 * Copyright (c) 1995 John S. Dyson. All rights reserved.
6 * Copyright (c) 2012-2013 Matthew Dillon. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include "opt_debug_cluster.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
40 #include <sys/vnode.h>
41 #include <sys/malloc.h>
42 #include <sys/mount.h>
43 #include <sys/resourcevar.h>
44 #include <sys/vmmeter.h>
46 #include <vm/vm_object.h>
47 #include <vm/vm_page.h>
48 #include <sys/sysctl.h>
51 #include <vm/vm_page2.h>
53 #include <machine/limits.h>
56 * Cluster tracking cache - replaces the original vnode v_* fields which had
57 * limited utility and were not MP safe.
59 * The cluster tracking cache is a simple 4-way set-associative non-chained
60 * cache. It is capable of tracking up to four zones separated by 1MB or
63 * NOTE: We want this structure to be cache-line friendly so the iterator
64 * is embedded rather than in a separate array.
66 * NOTE: A cluster cache entry can become stale when a vnode is recycled.
67 * For now we treat the values as heuristical but also self-consistent.
68 * i.e. the values cannot be completely random and cannot be SMP unsafe
69 * or the cluster code might end-up clustering non-contiguous buffers
70 * at the wrong offsets.
72 struct cluster_cache
{
75 off_t v_lastw
; /* last write (write cluster) */
76 off_t v_cstart
; /* start block of cluster */
77 off_t v_lasta
; /* last allocation */
78 u_int v_clen
; /* length of current cluster */
82 typedef struct cluster_cache cluster_cache_t
;
84 #define CLUSTER_CACHE_SIZE 512
85 #define CLUSTER_CACHE_MASK (CLUSTER_CACHE_SIZE - 1)
87 #define CLUSTER_ZONE ((off_t)(1024 * 1024))
89 cluster_cache_t cluster_array
[CLUSTER_CACHE_SIZE
];
91 #if defined(CLUSTERDEBUG)
92 #include <sys/sysctl.h>
93 static int rcluster
= 0;
94 SYSCTL_INT(_debug
, OID_AUTO
, rcluster
, CTLFLAG_RW
, &rcluster
, 0, "");
97 static MALLOC_DEFINE(M_SEGMENT
, "cluster_save", "cluster_save buffer");
99 static struct cluster_save
*
100 cluster_collectbufs (cluster_cache_t
*cc
, struct vnode
*vp
,
101 struct buf
*last_bp
, int blksize
);
103 cluster_rbuild (struct vnode
*vp
, off_t filesize
, off_t loffset
,
104 off_t doffset
, int blksize
, int run
,
105 struct buf
*fbp
, int *srp
);
106 static void cluster_callback (struct bio
*);
107 static void cluster_setram (struct buf
*);
108 static void cluster_clrram (struct buf
*);
109 static int cluster_wbuild(struct vnode
*vp
, struct buf
**bpp
, int blksize
,
110 off_t start_loffset
, int bytes
);
112 static int write_behind
= 1;
113 SYSCTL_INT(_vfs
, OID_AUTO
, write_behind
, CTLFLAG_RW
, &write_behind
, 0,
114 "Cluster write-behind setting");
115 static quad_t write_behind_minfilesize
= 10 * 1024 * 1024;
116 SYSCTL_QUAD(_vfs
, OID_AUTO
, write_behind_minfilesize
, CTLFLAG_RW
,
117 &write_behind_minfilesize
, 0, "Cluster write-behind setting");
118 static int max_readahead
= 2 * 1024 * 1024;
119 SYSCTL_INT(_vfs
, OID_AUTO
, max_readahead
, CTLFLAG_RW
, &max_readahead
, 0,
120 "Limit in bytes for desired cluster read-ahead");
122 extern vm_page_t bogus_page
;
124 extern int cluster_pbuf_freecnt
;
127 * nblks is our cluster_rbuild request size. The approximate number of
128 * physical read-ahead requests is maxra / nblks. The physical request
129 * size is limited by the device (maxrbuild). We also do not want to make
130 * the request size too big or it will mess up the B_RAM streaming.
134 calc_rbuild_reqsize(int maxra
, int maxrbuild
)
138 if ((nblks
= maxra
/ 4) > maxrbuild
)
146 * Acquire/release cluster cache (can return dummy entry)
150 cluster_getcache(cluster_cache_t
*dummy
, struct vnode
*vp
, off_t loffset
)
157 hv
= (size_t)(intptr_t)vp
^ (size_t)(intptr_t)vp
/ sizeof(*vp
);
158 hv
&= CLUSTER_CACHE_MASK
& ~3;
159 cc
= &cluster_array
[hv
];
162 for (i
= 0; i
< 4; ++i
) {
165 if (((cc
[i
].v_cstart
^ loffset
) & ~(CLUSTER_ZONE
- 1)) == 0) {
170 if (xact
>= 0 && atomic_swap_int(&cc
[xact
].locked
, 1) == 0) {
171 if (cc
[xact
].vp
== vp
&&
172 ((cc
[i
].v_cstart
^ loffset
) & ~(CLUSTER_ZONE
- 1)) == 0) {
175 atomic_swap_int(&cc
[xact
].locked
, 0);
179 * New entry. If we can't acquire the cache line then use the
180 * passed-in dummy element and reset all fields.
182 * When we are able to acquire the cache line we only clear the
183 * fields if the vp does not match. This allows us to multi-zone
184 * a vp and for excessive zones / partial clusters to be retired.
186 i
= cc
->iterator
++ & 3;
188 if (atomic_swap_int(&cc
->locked
, 1) != 0) {
205 cluster_putcache(cluster_cache_t
*cc
)
207 atomic_swap_int(&cc
->locked
, 0);
211 * This replaces bread(), providing a synchronous read of the requested
212 * buffer plus asynchronous read-ahead within the specified bounds.
214 * The caller may pre-populate *bpp if it already has the requested buffer
215 * in-hand, else must set *bpp to NULL. Note that the cluster_read() inline
216 * sets *bpp to NULL and then calls cluster_readx() for compatibility.
218 * filesize - read-ahead @ blksize will not cross this boundary
219 * loffset - loffset for returned *bpp
220 * blksize - blocksize for returned *bpp and read-ahead bps
221 * minreq - minimum (not a hard minimum) in bytes, typically reflects
222 * a higher level uio resid.
223 * maxreq - maximum (sequential heuristic) in bytes (highet typ ~2MB)
224 * bpp - return buffer (*bpp) for (loffset,blksize)
227 cluster_readx(struct vnode
*vp
, off_t filesize
, off_t loffset
,
228 int blksize
, size_t minreq
, size_t maxreq
, struct buf
**bpp
)
230 struct buf
*bp
, *rbp
, *reqbp
;
242 * Calculate the desired read-ahead in blksize'd blocks (maxra).
243 * To do this we calculate maxreq.
245 * maxreq typically starts out as a sequential heuristic. If the
246 * high level uio/resid is bigger (minreq), we pop maxreq up to
247 * minreq. This represents the case where random I/O is being
248 * performed by the userland is issuing big read()'s.
250 * Then we limit maxreq to max_readahead to ensure it is a reasonable
253 * Finally we must ensure that (loffset + maxreq) does not cross the
254 * boundary (filesize) for the current blocksize. If we allowed it
255 * to cross we could end up with buffers past the boundary with the
256 * wrong block size (HAMMER large-data areas use mixed block sizes).
257 * minreq is also absolutely limited to filesize.
261 /* minreq not used beyond this point */
263 if (maxreq
> max_readahead
) {
264 maxreq
= max_readahead
;
265 if (maxreq
> 16 * 1024 * 1024)
266 maxreq
= 16 * 1024 * 1024;
268 if (maxreq
< blksize
)
270 if (loffset
+ maxreq
> filesize
) {
271 if (loffset
> filesize
)
274 maxreq
= filesize
- loffset
;
277 maxra
= (int)(maxreq
/ blksize
);
280 * Get the requested block.
285 *bpp
= reqbp
= bp
= getblk(vp
, loffset
, blksize
, 0, 0);
286 origoffset
= loffset
;
289 * Calculate the maximum cluster size for a single I/O, used
290 * by cluster_rbuild().
292 maxrbuild
= vmaxiosize(vp
) / blksize
;
295 * if it is in the cache, then check to see if the reads have been
296 * sequential. If they have, then try some read-ahead, otherwise
297 * back-off on prospective read-aheads.
299 if (bp
->b_flags
& B_CACHE
) {
301 * Not sequential, do not do any read-ahead
307 * No read-ahead mark, do not do any read-ahead
310 if ((bp
->b_flags
& B_RAM
) == 0)
314 * We hit a read-ahead-mark, figure out how much read-ahead
315 * to do (maxra) and where to start (loffset).
317 * Typically the way this works is that B_RAM is set in the
318 * middle of the cluster and triggers an overlapping
319 * read-ahead of 1/2 a cluster more blocks. This ensures
320 * that the cluster read-ahead scales with the read-ahead
321 * count and is thus better-able to absorb the caller's
324 * Estimate where the next unread block will be by assuming
325 * that the B_RAM's are placed at the half-way point.
327 bp
->b_flags
&= ~B_RAM
;
330 rbp
= findblk(vp
, loffset
+ i
* blksize
, FINDBLK_TEST
);
331 if (rbp
== NULL
|| (rbp
->b_flags
& B_CACHE
) == 0) {
334 rbp
= findblk(vp
, loffset
+ i
* blksize
,
343 rbp
= findblk(vp
, loffset
+ i
* blksize
,
352 * We got everything or everything is in the cache, no
359 * Calculate where to start the read-ahead and how much
360 * to do. Generally speaking we want to read-ahead by
361 * (maxra) when we've found a read-ahead mark. We do
362 * not want to reduce maxra here as it will cause
363 * successive read-ahead I/O's to be smaller and smaller.
365 * However, we have to make sure we don't break the
366 * filesize limitation for the clustered operation.
368 loffset
+= i
* blksize
;
371 if (loffset
>= filesize
)
373 if (loffset
+ maxra
* blksize
> filesize
) {
374 maxreq
= filesize
- loffset
;
375 maxra
= (int)(maxreq
/ blksize
);
379 * Set RAM on first read-ahead block since we still have
380 * approximate maxra/2 blocks ahead of us that are already
381 * cached or in-progress.
386 * Start block is not valid, we will want to do a
389 __debugvar off_t firstread
= bp
->b_loffset
;
393 * Set-up synchronous read for bp.
395 bp
->b_cmd
= BUF_CMD_READ
;
396 bp
->b_bio1
.bio_done
= biodone_sync
;
397 bp
->b_bio1
.bio_flags
|= BIO_SYNC
;
399 KASSERT(firstread
!= NOOFFSET
,
400 ("cluster_read: no buffer offset"));
402 nblks
= calc_rbuild_reqsize(maxra
, maxrbuild
);
405 * Set RAM half-way through the full-cluster.
407 sr
= (maxra
+ 1) / 2;
412 error
= VOP_BMAP(vp
, loffset
, &doffset
,
413 &burstbytes
, NULL
, BUF_CMD_READ
);
415 goto single_block_read
;
416 if (nblks
> burstbytes
/ blksize
)
417 nblks
= burstbytes
/ blksize
;
418 if (doffset
== NOOFFSET
)
419 goto single_block_read
;
421 goto single_block_read
;
423 bp
= cluster_rbuild(vp
, filesize
, loffset
,
424 doffset
, blksize
, nblks
, bp
, &sr
);
425 loffset
+= bp
->b_bufsize
;
426 maxra
-= bp
->b_bufsize
/ blksize
;
430 * If it isn't in the cache, then get a chunk from
431 * disk if sequential, otherwise just get the block.
439 * If B_CACHE was not set issue bp. bp will either be an
440 * asynchronous cluster buf or a synchronous single-buf.
441 * If it is a single buf it will be the same as reqbp.
443 * NOTE: Once an async cluster buf is issued bp becomes invalid.
446 #if defined(CLUSTERDEBUG)
448 kprintf("S(%012jx,%d,%d)\n",
449 (intmax_t)bp
->b_loffset
, bp
->b_bcount
, maxra
);
451 if ((bp
->b_flags
& B_CLUSTER
) == 0)
452 vfs_busy_pages(vp
, bp
);
453 bp
->b_flags
&= ~(B_ERROR
|B_INVAL
);
454 vn_strategy(vp
, &bp
->b_bio1
);
459 #if defined(CLUSTERDEBUG)
461 kprintf("cluster_rd %016jx/%d maxra=%d sr=%d\n",
462 loffset
, blksize
, maxra
, sr
);
466 * If we have been doing sequential I/O, then do some read-ahead.
467 * The code above us should have positioned us at the next likely
470 * Only mess with buffers which we can immediately lock. HAMMER
471 * will do device-readahead irrespective of what the blocks
474 * Set B_RAM on the first buffer (the next likely offset needing
475 * read-ahead), under the assumption that there are still
476 * approximately maxra/2 blocks good ahead of us.
482 rbp
= getblk(vp
, loffset
, blksize
,
483 GETBLK_SZMATCH
|GETBLK_NOWAIT
, 0);
484 #if defined(CLUSTERDEBUG)
486 kprintf("read-ahead %016jx rbp=%p ",
492 if ((rbp
->b_flags
& B_CACHE
)) {
498 * If BMAP is not supported or has an issue, we still do
499 * (maxra) read-ahead, but we do not try to use rbuild.
501 error
= VOP_BMAP(vp
, loffset
, &doffset
,
502 &burstbytes
, NULL
, BUF_CMD_READ
);
503 if (error
|| doffset
== NOOFFSET
) {
507 nblks
= calc_rbuild_reqsize(maxra
, maxrbuild
);
508 if (nblks
> burstbytes
/ blksize
)
509 nblks
= burstbytes
/ blksize
;
511 rbp
->b_cmd
= BUF_CMD_READ
;
514 rbp
= cluster_rbuild(vp
, filesize
, loffset
,
518 rbp
->b_bio2
.bio_offset
= doffset
;
523 rbp
->b_flags
&= ~(B_ERROR
|B_INVAL
);
525 if ((rbp
->b_flags
& B_CLUSTER
) == 0)
526 vfs_busy_pages(vp
, rbp
);
528 loffset
+= rbp
->b_bufsize
;
529 maxra
-= rbp
->b_bufsize
/ blksize
;
530 vn_strategy(vp
, &rbp
->b_bio1
);
531 /* rbp invalid now */
535 * Wait for our original buffer to complete its I/O. reqbp will
536 * be NULL if the original buffer was B_CACHE. We are returning
537 * (*bpp) which is the same as reqbp when reqbp != NULL.
541 KKASSERT(reqbp
->b_bio1
.bio_flags
& BIO_SYNC
);
542 error
= biowait(&reqbp
->b_bio1
, "clurd");
550 * This replaces breadcb(), providing an asynchronous read of the requested
551 * buffer with a callback, plus an asynchronous read-ahead within the
554 * The callback must check whether BIO_DONE is set in the bio and issue
555 * the bpdone(bp, 0) if it isn't. The callback is responsible for clearing
556 * BIO_DONE and disposing of the I/O (bqrelse()ing it).
558 * filesize - read-ahead @ blksize will not cross this boundary
559 * loffset - loffset for returned *bpp
560 * blksize - blocksize for returned *bpp and read-ahead bps
561 * minreq - minimum (not a hard minimum) in bytes, typically reflects
562 * a higher level uio resid.
563 * maxreq - maximum (sequential heuristic) in bytes (highet typ ~2MB)
564 * bpp - return buffer (*bpp) for (loffset,blksize)
567 cluster_readcb(struct vnode
*vp
, off_t filesize
, off_t loffset
,
568 int blksize
, size_t minreq
, size_t maxreq
,
569 void (*func
)(struct bio
*), void *arg
)
571 struct buf
*bp
, *rbp
, *reqbp
;
582 * Calculate the desired read-ahead in blksize'd blocks (maxra).
583 * To do this we calculate maxreq.
585 * maxreq typically starts out as a sequential heuristic. If the
586 * high level uio/resid is bigger (minreq), we pop maxreq up to
587 * minreq. This represents the case where random I/O is being
588 * performed by the userland is issuing big read()'s.
590 * Then we limit maxreq to max_readahead to ensure it is a reasonable
593 * Finally we must ensure that (loffset + maxreq) does not cross the
594 * boundary (filesize) for the current blocksize. If we allowed it
595 * to cross we could end up with buffers past the boundary with the
596 * wrong block size (HAMMER large-data areas use mixed block sizes).
597 * minreq is also absolutely limited to filesize.
601 /* minreq not used beyond this point */
603 if (maxreq
> max_readahead
) {
604 maxreq
= max_readahead
;
605 if (maxreq
> 16 * 1024 * 1024)
606 maxreq
= 16 * 1024 * 1024;
608 if (maxreq
< blksize
)
610 if (loffset
+ maxreq
> filesize
) {
611 if (loffset
> filesize
)
614 maxreq
= filesize
- loffset
;
617 maxra
= (int)(maxreq
/ blksize
);
620 * Get the requested block.
622 reqbp
= bp
= getblk(vp
, loffset
, blksize
, 0, 0);
623 origoffset
= loffset
;
626 * Calculate the maximum cluster size for a single I/O, used
627 * by cluster_rbuild().
629 maxrbuild
= vmaxiosize(vp
) / blksize
;
632 * if it is in the cache, then check to see if the reads have been
633 * sequential. If they have, then try some read-ahead, otherwise
634 * back-off on prospective read-aheads.
636 if (bp
->b_flags
& B_CACHE
) {
638 * Setup for func() call whether we do read-ahead or not.
640 bp
->b_bio1
.bio_caller_info1
.ptr
= arg
;
641 bp
->b_bio1
.bio_flags
|= BIO_DONE
;
644 * Not sequential, do not do any read-ahead
650 * No read-ahead mark, do not do any read-ahead
653 if ((bp
->b_flags
& B_RAM
) == 0)
655 bp
->b_flags
&= ~B_RAM
;
658 * We hit a read-ahead-mark, figure out how much read-ahead
659 * to do (maxra) and where to start (loffset).
661 * Shortcut the scan. Typically the way this works is that
662 * we've built up all the blocks inbetween except for the
663 * last in previous iterations, so if the second-to-last
664 * block is present we just skip ahead to it.
666 * This algorithm has O(1) cpu in the steady state no
667 * matter how large maxra is.
669 if (findblk(vp
, loffset
+ (maxra
- 2) * blksize
, FINDBLK_TEST
))
674 if (findblk(vp
, loffset
+ i
* blksize
,
675 FINDBLK_TEST
) == NULL
) {
682 * We got everything or everything is in the cache, no
689 * Calculate where to start the read-ahead and how much
690 * to do. Generally speaking we want to read-ahead by
691 * (maxra) when we've found a read-ahead mark. We do
692 * not want to reduce maxra here as it will cause
693 * successive read-ahead I/O's to be smaller and smaller.
695 * However, we have to make sure we don't break the
696 * filesize limitation for the clustered operation.
698 loffset
+= i
* blksize
;
700 /* leave reqbp intact to force function callback */
702 if (loffset
>= filesize
)
704 if (loffset
+ maxra
* blksize
> filesize
) {
705 maxreq
= filesize
- loffset
;
706 maxra
= (int)(maxreq
/ blksize
);
711 * bp is not valid, no prior cluster in progress so get a
712 * full cluster read-ahead going.
714 __debugvar off_t firstread
= bp
->b_loffset
;
719 * Set-up synchronous read for bp.
721 bp
->b_flags
&= ~(B_ERROR
| B_EINTR
| B_INVAL
);
722 bp
->b_cmd
= BUF_CMD_READ
;
723 bp
->b_bio1
.bio_done
= func
;
724 bp
->b_bio1
.bio_caller_info1
.ptr
= arg
;
726 reqbp
= NULL
; /* don't func() reqbp, it's running async */
728 KASSERT(firstread
!= NOOFFSET
,
729 ("cluster_read: no buffer offset"));
732 * nblks is our cluster_rbuild request size, limited
733 * primarily by the device.
735 nblks
= calc_rbuild_reqsize(maxra
, maxrbuild
);
738 * Set RAM half-way through the full-cluster.
740 sr
= (maxra
+ 1) / 2;
745 error
= VOP_BMAP(vp
, loffset
, &doffset
,
746 &burstbytes
, NULL
, BUF_CMD_READ
);
748 goto single_block_read
;
749 if (nblks
> burstbytes
/ blksize
)
750 nblks
= burstbytes
/ blksize
;
751 if (doffset
== NOOFFSET
)
752 goto single_block_read
;
754 goto single_block_read
;
756 bp
= cluster_rbuild(vp
, filesize
, loffset
,
757 doffset
, blksize
, nblks
, bp
, &sr
);
758 loffset
+= bp
->b_bufsize
;
759 maxra
-= bp
->b_bufsize
/ blksize
;
763 * If it isn't in the cache, then get a chunk from
764 * disk if sequential, otherwise just get the block.
772 * If bp != NULL then B_CACHE was *NOT* set and bp must be issued.
773 * bp will either be an asynchronous cluster buf or an asynchronous
776 * NOTE: Once an async cluster buf is issued bp becomes invalid.
779 #if defined(CLUSTERDEBUG)
781 kprintf("S(%012jx,%d,%d)\n",
782 (intmax_t)bp
->b_loffset
, bp
->b_bcount
, maxra
);
784 if ((bp
->b_flags
& B_CLUSTER
) == 0)
785 vfs_busy_pages(vp
, bp
);
786 bp
->b_flags
&= ~(B_ERROR
|B_INVAL
);
787 vn_strategy(vp
, &bp
->b_bio1
);
792 #if defined(CLUSTERDEBUG)
794 kprintf("cluster_rd %016jx/%d maxra=%d sr=%d\n",
795 loffset
, blksize
, maxra
, sr
);
799 * If we have been doing sequential I/O, then do some read-ahead.
800 * The code above us should have positioned us at the next likely
803 * Only mess with buffers which we can immediately lock. HAMMER
804 * will do device-readahead irrespective of what the blocks
812 rbp
= getblk(vp
, loffset
, blksize
,
813 GETBLK_SZMATCH
|GETBLK_NOWAIT
, 0);
816 if ((rbp
->b_flags
& B_CACHE
)) {
822 * If BMAP is not supported or has an issue, we still do
823 * (maxra) read-ahead, but we do not try to use rbuild.
825 error
= VOP_BMAP(vp
, loffset
, &doffset
,
826 &burstbytes
, NULL
, BUF_CMD_READ
);
827 if (error
|| doffset
== NOOFFSET
) {
831 nblks
= calc_rbuild_reqsize(maxra
, maxrbuild
);
832 if (nblks
> burstbytes
/ blksize
)
833 nblks
= burstbytes
/ blksize
;
835 rbp
->b_cmd
= BUF_CMD_READ
;
838 rbp
= cluster_rbuild(vp
, filesize
, loffset
,
842 rbp
->b_bio2
.bio_offset
= doffset
;
847 rbp
->b_flags
&= ~(B_ERROR
|B_INVAL
);
849 if ((rbp
->b_flags
& B_CLUSTER
) == 0)
850 vfs_busy_pages(vp
, rbp
);
852 loffset
+= rbp
->b_bufsize
;
853 maxra
-= rbp
->b_bufsize
/ blksize
;
854 vn_strategy(vp
, &rbp
->b_bio1
);
855 /* rbp invalid now */
859 * If reqbp is non-NULL it had B_CACHE set and we issue the
860 * function callback synchronously.
862 * Note that we may start additional asynchronous I/O before doing
863 * the func() callback for the B_CACHE case
867 func(&reqbp
->b_bio1
);
871 * If blocks are contiguous on disk, use this to provide clustered
872 * read ahead. We will read as many blocks as possible sequentially
873 * and then parcel them up into logical blocks in the buffer hash table.
875 * This function either returns a cluster buf or it returns fbp. fbp is
876 * already expected to be set up as a synchronous or asynchronous request.
878 * If a cluster buf is returned it will always be async.
880 * (*srp) counts down original blocks to determine where B_RAM should be set.
881 * Set B_RAM when *srp drops to 0. If (*srp) starts at 0, B_RAM will not be
882 * set on any buffer. Make sure B_RAM is cleared on any other buffers to
883 * prevent degenerate read-aheads from being generated.
886 cluster_rbuild(struct vnode
*vp
, off_t filesize
, off_t loffset
, off_t doffset
,
887 int blksize
, int run
, struct buf
*fbp
, int *srp
)
889 struct buf
*bp
, *tbp
;
892 int maxiosize
= vmaxiosize(vp
);
897 while (loffset
+ run
* blksize
> filesize
) {
902 tbp
->b_bio2
.bio_offset
= doffset
;
903 if((tbp
->b_flags
& B_MALLOC
) ||
904 ((tbp
->b_flags
& B_VMIO
) == 0) || (run
<= 1)) {
912 bp
= trypbuf_kva(&cluster_pbuf_freecnt
);
918 * We are synthesizing a buffer out of vm_page_t's, but
919 * if the block size is not page aligned then the starting
920 * address may not be either. Inherit the b_data offset
921 * from the original buffer.
923 bp
->b_data
= (char *)((vm_offset_t
)bp
->b_data
|
924 ((vm_offset_t
)tbp
->b_data
& PAGE_MASK
));
925 bp
->b_flags
|= B_CLUSTER
| B_VMIO
;
926 bp
->b_cmd
= BUF_CMD_READ
;
927 bp
->b_bio1
.bio_done
= cluster_callback
; /* default to async */
928 bp
->b_bio1
.bio_caller_info1
.cluster_head
= NULL
;
929 bp
->b_bio1
.bio_caller_info2
.cluster_tail
= NULL
;
930 bp
->b_loffset
= loffset
;
931 bp
->b_bio2
.bio_offset
= doffset
;
932 KASSERT(bp
->b_loffset
!= NOOFFSET
,
933 ("cluster_rbuild: no buffer offset"));
937 bp
->b_xio
.xio_npages
= 0;
939 for (boffset
= doffset
, i
= 0; i
< run
; ++i
, boffset
+= blksize
) {
941 if ((bp
->b_xio
.xio_npages
* PAGE_SIZE
) +
942 round_page(blksize
) > maxiosize
) {
947 * Shortcut some checks and try to avoid buffers that
948 * would block in the lock. The same checks have to
949 * be made again after we officially get the buffer.
951 tbp
= getblk(vp
, loffset
+ i
* blksize
, blksize
,
952 GETBLK_SZMATCH
|GETBLK_NOWAIT
, 0);
955 for (j
= 0; j
< tbp
->b_xio
.xio_npages
; j
++) {
956 if (tbp
->b_xio
.xio_pages
[j
]->valid
)
959 if (j
!= tbp
->b_xio
.xio_npages
) {
965 * Stop scanning if the buffer is fuly valid
966 * (marked B_CACHE), or locked (may be doing a
967 * background write), or if the buffer is not
968 * VMIO backed. The clustering code can only deal
969 * with VMIO-backed buffers.
971 if ((tbp
->b_flags
& (B_CACHE
|B_LOCKED
)) ||
972 (tbp
->b_flags
& B_VMIO
) == 0 ||
973 (LIST_FIRST(&tbp
->b_dep
) != NULL
&&
981 * The buffer must be completely invalid in order to
982 * take part in the cluster. If it is partially valid
985 for (j
= 0;j
< tbp
->b_xio
.xio_npages
; j
++) {
986 if (tbp
->b_xio
.xio_pages
[j
]->valid
)
989 if (j
!= tbp
->b_xio
.xio_npages
) {
995 * Depress the priority of buffers not explicitly
998 /* tbp->b_flags |= B_AGE; */
1001 * Set the block number if it isn't set, otherwise
1002 * if it is make sure it matches the block number we
1005 if (tbp
->b_bio2
.bio_offset
== NOOFFSET
) {
1006 tbp
->b_bio2
.bio_offset
= boffset
;
1007 } else if (tbp
->b_bio2
.bio_offset
!= boffset
) {
1014 * Set B_RAM if (*srp) is 1. B_RAM is only set on one buffer
1015 * in the cluster, including potentially the first buffer
1016 * once we start streaming the read-aheads.
1019 cluster_setram(tbp
);
1021 cluster_clrram(tbp
);
1024 * The passed-in tbp (i == 0) will already be set up for
1025 * async or sync operation. All other tbp's acquire in
1026 * our loop are set up for async operation.
1028 tbp
->b_cmd
= BUF_CMD_READ
;
1030 cluster_append(&bp
->b_bio1
, tbp
);
1031 for (j
= 0; j
< tbp
->b_xio
.xio_npages
; ++j
) {
1034 m
= tbp
->b_xio
.xio_pages
[j
];
1035 vm_page_busy_wait(m
, FALSE
, "clurpg");
1036 vm_page_io_start(m
);
1038 vm_object_pip_add(m
->object
, 1);
1039 if ((bp
->b_xio
.xio_npages
== 0) ||
1040 (bp
->b_xio
.xio_pages
[bp
->b_xio
.xio_npages
-1] != m
)) {
1041 bp
->b_xio
.xio_pages
[bp
->b_xio
.xio_npages
] = m
;
1042 bp
->b_xio
.xio_npages
++;
1044 if ((m
->valid
& VM_PAGE_BITS_ALL
) == VM_PAGE_BITS_ALL
)
1045 tbp
->b_xio
.xio_pages
[j
] = bogus_page
;
1048 * XXX shouldn't this be += size for both, like in
1051 * Don't inherit tbp->b_bufsize as it may be larger due to
1052 * a non-page-aligned size. Instead just aggregate using
1055 if (tbp
->b_bcount
!= blksize
)
1056 kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp
->b_bcount
, blksize
);
1057 if (tbp
->b_bufsize
!= blksize
)
1058 kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp
->b_bufsize
, blksize
);
1059 bp
->b_bcount
+= blksize
;
1060 bp
->b_bufsize
+= blksize
;
1064 * Fully valid pages in the cluster are already good and do not need
1065 * to be re-read from disk. Replace the page with bogus_page
1067 for (j
= 0; j
< bp
->b_xio
.xio_npages
; j
++) {
1068 if ((bp
->b_xio
.xio_pages
[j
]->valid
& VM_PAGE_BITS_ALL
) ==
1070 bp
->b_xio
.xio_pages
[j
] = bogus_page
;
1073 if (bp
->b_bufsize
> bp
->b_kvasize
) {
1074 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
1075 bp
->b_bufsize
, bp
->b_kvasize
);
1077 pmap_qenter(trunc_page((vm_offset_t
) bp
->b_data
),
1078 (vm_page_t
*)bp
->b_xio
.xio_pages
, bp
->b_xio
.xio_npages
);
1084 * Cleanup after a clustered read or write.
1085 * This is complicated by the fact that any of the buffers might have
1086 * extra memory (if there were no empty buffer headers at allocbuf time)
1087 * that we will need to shift around.
1089 * The returned bio is &bp->b_bio1
1092 cluster_callback(struct bio
*bio
)
1094 struct buf
*bp
= bio
->bio_buf
;
1099 * Must propogate errors to all the components. A short read (EOF)
1100 * is a critical error.
1102 if (bp
->b_flags
& B_ERROR
) {
1103 error
= bp
->b_error
;
1104 } else if (bp
->b_bcount
!= bp
->b_bufsize
) {
1105 panic("cluster_callback: unexpected EOF on cluster %p!", bio
);
1108 pmap_qremove(trunc_page((vm_offset_t
) bp
->b_data
),
1109 bp
->b_xio
.xio_npages
);
1111 * Move memory from the large cluster buffer into the component
1112 * buffers and mark IO as done on these. Since the memory map
1113 * is the same, no actual copying is required.
1115 while ((tbp
= bio
->bio_caller_info1
.cluster_head
) != NULL
) {
1116 bio
->bio_caller_info1
.cluster_head
= tbp
->b_cluster_next
;
1118 tbp
->b_flags
|= B_ERROR
| B_IOISSUED
;
1119 tbp
->b_error
= error
;
1121 tbp
->b_dirtyoff
= tbp
->b_dirtyend
= 0;
1122 tbp
->b_flags
&= ~(B_ERROR
|B_INVAL
);
1123 tbp
->b_flags
|= B_IOISSUED
;
1125 * XXX the bdwrite()/bqrelse() issued during
1126 * cluster building clears B_RELBUF (see bqrelse()
1127 * comment). If direct I/O was specified, we have
1128 * to restore it here to allow the buffer and VM
1131 if (tbp
->b_flags
& B_DIRECT
)
1132 tbp
->b_flags
|= B_RELBUF
;
1135 * XXX I think biodone() below will do this, but do
1136 * it here anyway for consistency.
1138 if (tbp
->b_cmd
== BUF_CMD_WRITE
)
1141 biodone(&tbp
->b_bio1
);
1143 relpbuf(bp
, &cluster_pbuf_freecnt
);
1147 * Implement modified write build for cluster.
1149 * write_behind = 0 write behind disabled
1150 * write_behind = 1 write behind normal (default)
1151 * write_behind = 2 write behind backed-off
1153 * In addition, write_behind is only activated for files that have
1154 * grown past a certain size (default 10MB). Otherwise temporary files
1155 * wind up generating a lot of unnecessary disk I/O.
1158 cluster_wbuild_wb(struct vnode
*vp
, int blksize
, off_t start_loffset
, int len
)
1162 switch(write_behind
) {
1164 if (start_loffset
< len
)
1166 start_loffset
-= len
;
1169 if (vp
->v_filesize
>= write_behind_minfilesize
) {
1170 r
= cluster_wbuild(vp
, NULL
, blksize
,
1171 start_loffset
, len
);
1182 * Do clustered write for FFS.
1185 * 1. Write is not sequential (write asynchronously)
1186 * Write is sequential:
1187 * 2. beginning of cluster - begin cluster
1188 * 3. middle of a cluster - add to cluster
1189 * 4. end of a cluster - asynchronously write cluster
1191 * WARNING! vnode fields are not locked and must ONLY be used heuristically.
1194 cluster_write(struct buf
*bp
, off_t filesize
, int blksize
, int seqcount
)
1198 int maxclen
, cursize
;
1200 cluster_cache_t dummy
;
1201 cluster_cache_t
*cc
;
1204 if (vp
->v_type
== VREG
)
1205 async
= vp
->v_mount
->mnt_flag
& MNT_ASYNC
;
1208 loffset
= bp
->b_loffset
;
1209 KASSERT(bp
->b_loffset
!= NOOFFSET
,
1210 ("cluster_write: no buffer offset"));
1212 cc
= cluster_getcache(&dummy
, vp
, loffset
);
1215 * Initialize vnode to beginning of file.
1218 cc
->v_lasta
= cc
->v_clen
= cc
->v_cstart
= cc
->v_lastw
= 0;
1220 if (cc
->v_clen
== 0 || loffset
!= cc
->v_lastw
+ blksize
||
1221 bp
->b_bio2
.bio_offset
== NOOFFSET
||
1222 (bp
->b_bio2
.bio_offset
!= cc
->v_lasta
+ blksize
)) {
1223 maxclen
= vmaxiosize(vp
);
1224 if (cc
->v_clen
!= 0) {
1226 * Next block is not sequential.
1228 * If we are not writing at end of file, the process
1229 * seeked to another point in the file since its last
1230 * write, or we have reached our maximum cluster size,
1231 * then push the previous cluster. Otherwise try
1232 * reallocating to make it sequential.
1234 * Change to algorithm: only push previous cluster if
1235 * it was sequential from the point of view of the
1236 * seqcount heuristic, otherwise leave the buffer
1237 * intact so we can potentially optimize the I/O
1238 * later on in the buf_daemon or update daemon
1241 cursize
= cc
->v_lastw
- cc
->v_cstart
+ blksize
;
1242 if (bp
->b_loffset
+ blksize
< filesize
||
1243 loffset
!= cc
->v_lastw
+ blksize
||
1244 cc
->v_clen
<= cursize
) {
1245 if (!async
&& seqcount
> 0) {
1246 cluster_wbuild_wb(vp
, blksize
,
1247 cc
->v_cstart
, cursize
);
1250 struct buf
**bpp
, **endbp
;
1251 struct cluster_save
*buflist
;
1253 buflist
= cluster_collectbufs(cc
, vp
,
1255 endbp
= &buflist
->bs_children
1256 [buflist
->bs_nchildren
- 1];
1257 if (VOP_REALLOCBLKS(vp
, buflist
)) {
1259 * Failed, push the previous cluster
1260 * if *really* writing sequentially
1261 * in the logical file (seqcount > 1),
1262 * otherwise delay it in the hopes that
1263 * the low level disk driver can
1264 * optimize the write ordering.
1266 * NOTE: We do not brelse the last
1267 * element which is bp, and we
1268 * do not return here.
1270 for (bpp
= buflist
->bs_children
;
1273 kfree(buflist
, M_SEGMENT
);
1275 cluster_wbuild_wb(vp
,
1276 blksize
, cc
->v_cstart
,
1281 * Succeeded, keep building cluster.
1283 for (bpp
= buflist
->bs_children
;
1284 bpp
<= endbp
; bpp
++)
1286 kfree(buflist
, M_SEGMENT
);
1287 cc
->v_lastw
= loffset
;
1288 cc
->v_lasta
= bp
->b_bio2
.bio_offset
;
1289 cluster_putcache(cc
);
1295 * Consider beginning a cluster. If at end of file, make
1296 * cluster as large as possible, otherwise find size of
1299 if ((vp
->v_type
== VREG
) &&
1300 bp
->b_loffset
+ blksize
< filesize
&&
1301 (bp
->b_bio2
.bio_offset
== NOOFFSET
) &&
1302 (VOP_BMAP(vp
, loffset
, &bp
->b_bio2
.bio_offset
, &maxclen
, NULL
, BUF_CMD_WRITE
) ||
1303 bp
->b_bio2
.bio_offset
== NOOFFSET
)) {
1306 cc
->v_lasta
= bp
->b_bio2
.bio_offset
;
1307 cc
->v_cstart
= loffset
+ blksize
;
1308 cc
->v_lastw
= loffset
;
1309 cluster_putcache(cc
);
1312 if (maxclen
> blksize
)
1313 cc
->v_clen
= maxclen
- blksize
;
1316 if (!async
&& cc
->v_clen
== 0) { /* I/O not contiguous */
1317 cc
->v_cstart
= loffset
+ blksize
;
1319 } else { /* Wait for rest of cluster */
1320 cc
->v_cstart
= loffset
;
1323 } else if (loffset
== cc
->v_cstart
+ cc
->v_clen
) {
1325 * At end of cluster, write it out if seqcount tells us we
1326 * are operating sequentially, otherwise let the buf or
1327 * update daemon handle it.
1331 cluster_wbuild_wb(vp
, blksize
, cc
->v_cstart
,
1332 cc
->v_clen
+ blksize
);
1334 cc
->v_cstart
= loffset
+ blksize
;
1335 } else if (vm_page_count_severe() &&
1336 bp
->b_loffset
+ blksize
< filesize
) {
1338 * We are low on memory, get it going NOW. However, do not
1339 * try to push out a partial block at the end of the file
1340 * as this could lead to extremely non-optimal write activity.
1345 * In the middle of a cluster, so just delay the I/O for now.
1349 cc
->v_lastw
= loffset
;
1350 cc
->v_lasta
= bp
->b_bio2
.bio_offset
;
1351 cluster_putcache(cc
);
1355 * This is the clustered version of bawrite(). It works similarly to
1356 * cluster_write() except I/O on the buffer is guaranteed to occur.
1359 cluster_awrite(struct buf
*bp
)
1364 * Don't bother if it isn't clusterable.
1366 if ((bp
->b_flags
& B_CLUSTEROK
) == 0 ||
1368 (bp
->b_vp
->v_flag
& VOBJBUF
) == 0) {
1369 total
= bp
->b_bufsize
;
1374 total
= cluster_wbuild(bp
->b_vp
, &bp
, bp
->b_bufsize
,
1375 bp
->b_loffset
, vmaxiosize(bp
->b_vp
));
1383 * This is an awful lot like cluster_rbuild...wish they could be combined.
1384 * The last lbn argument is the current block on which I/O is being
1385 * performed. Check to see that it doesn't fall in the middle of
1386 * the current block (if last_bp == NULL).
1388 * cluster_wbuild() normally does not guarantee anything. If bpp is
1389 * non-NULL and cluster_wbuild() is able to incorporate it into the
1390 * I/O it will set *bpp to NULL, otherwise it will leave it alone and
1391 * the caller must dispose of *bpp.
1394 cluster_wbuild(struct vnode
*vp
, struct buf
**bpp
,
1395 int blksize
, off_t start_loffset
, int bytes
)
1397 struct buf
*bp
, *tbp
;
1399 int totalwritten
= 0;
1401 int maxiosize
= vmaxiosize(vp
);
1405 * If the buffer matches the passed locked & removed buffer
1406 * we used the passed buffer (which might not be B_DELWRI).
1408 * Otherwise locate the buffer and determine if it is
1411 if (bpp
&& (*bpp
)->b_loffset
== start_loffset
) {
1416 tbp
= findblk(vp
, start_loffset
, FINDBLK_NBLOCK
);
1418 (tbp
->b_flags
& (B_LOCKED
| B_INVAL
| B_DELWRI
)) !=
1420 (LIST_FIRST(&tbp
->b_dep
) && buf_checkwrite(tbp
))) {
1423 start_loffset
+= blksize
;
1429 KKASSERT(tbp
->b_cmd
== BUF_CMD_DONE
);
1432 * Extra memory in the buffer, punt on this buffer.
1433 * XXX we could handle this in most cases, but we would
1434 * have to push the extra memory down to after our max
1435 * possible cluster size and then potentially pull it back
1436 * up if the cluster was terminated prematurely--too much
1439 if (((tbp
->b_flags
& (B_CLUSTEROK
|B_MALLOC
)) != B_CLUSTEROK
) ||
1440 (tbp
->b_bcount
!= tbp
->b_bufsize
) ||
1441 (tbp
->b_bcount
!= blksize
) ||
1442 (bytes
== blksize
) ||
1443 ((bp
= getpbuf_kva(&cluster_pbuf_freecnt
)) == NULL
)) {
1444 totalwritten
+= tbp
->b_bufsize
;
1446 start_loffset
+= blksize
;
1452 * Set up the pbuf. Track our append point with b_bcount
1453 * and b_bufsize. b_bufsize is not used by the device but
1454 * our caller uses it to loop clusters and we use it to
1455 * detect a premature EOF on the block device.
1459 bp
->b_xio
.xio_npages
= 0;
1460 bp
->b_loffset
= tbp
->b_loffset
;
1461 bp
->b_bio2
.bio_offset
= tbp
->b_bio2
.bio_offset
;
1464 * We are synthesizing a buffer out of vm_page_t's, but
1465 * if the block size is not page aligned then the starting
1466 * address may not be either. Inherit the b_data offset
1467 * from the original buffer.
1469 bp
->b_data
= (char *)((vm_offset_t
)bp
->b_data
|
1470 ((vm_offset_t
)tbp
->b_data
& PAGE_MASK
));
1471 bp
->b_flags
&= ~B_ERROR
;
1472 bp
->b_flags
|= B_CLUSTER
| B_BNOCLIP
|
1473 (tbp
->b_flags
& (B_VMIO
| B_NEEDCOMMIT
));
1474 bp
->b_bio1
.bio_caller_info1
.cluster_head
= NULL
;
1475 bp
->b_bio1
.bio_caller_info2
.cluster_tail
= NULL
;
1478 * From this location in the file, scan forward to see
1479 * if there are buffers with adjacent data that need to
1480 * be written as well.
1482 * IO *must* be initiated on index 0 at this point
1483 * (particularly when called from cluster_awrite()).
1485 for (i
= 0; i
< bytes
; (i
+= blksize
), (start_loffset
+= blksize
)) {
1493 tbp
= findblk(vp
, start_loffset
,
1496 * Buffer not found or could not be locked
1503 * If it IS in core, but has different
1504 * characteristics, then don't cluster
1507 if ((tbp
->b_flags
& (B_VMIO
| B_CLUSTEROK
|
1508 B_INVAL
| B_DELWRI
| B_NEEDCOMMIT
))
1509 != (B_DELWRI
| B_CLUSTEROK
|
1510 (bp
->b_flags
& (B_VMIO
| B_NEEDCOMMIT
))) ||
1511 (tbp
->b_flags
& B_LOCKED
)
1518 * Check that the combined cluster
1519 * would make sense with regard to pages
1520 * and would not be too large
1522 * WARNING! buf_checkwrite() must be the last
1523 * check made. If it returns 0 then
1524 * we must initiate the I/O.
1526 if ((tbp
->b_bcount
!= blksize
) ||
1527 ((bp
->b_bio2
.bio_offset
+ i
) !=
1528 tbp
->b_bio2
.bio_offset
) ||
1529 ((tbp
->b_xio
.xio_npages
+ bp
->b_xio
.xio_npages
) >
1530 (maxiosize
/ PAGE_SIZE
)) ||
1531 (LIST_FIRST(&tbp
->b_dep
) &&
1532 buf_checkwrite(tbp
))
1537 if (LIST_FIRST(&tbp
->b_dep
))
1540 * Ok, it's passed all the tests,
1541 * so remove it from the free list
1542 * and mark it busy. We will use it.
1545 KKASSERT(tbp
->b_cmd
== BUF_CMD_DONE
);
1549 * If the IO is via the VM then we do some
1550 * special VM hackery (yuck). Since the buffer's
1551 * block size may not be page-aligned it is possible
1552 * for a page to be shared between two buffers. We
1553 * have to get rid of the duplication when building
1556 if (tbp
->b_flags
& B_VMIO
) {
1560 * Try to avoid deadlocks with the VM system.
1561 * However, we cannot abort the I/O if
1562 * must_initiate is non-zero.
1564 if (must_initiate
== 0) {
1566 j
< tbp
->b_xio
.xio_npages
;
1568 m
= tbp
->b_xio
.xio_pages
[j
];
1569 if (m
->flags
& PG_BUSY
) {
1576 for (j
= 0; j
< tbp
->b_xio
.xio_npages
; ++j
) {
1577 m
= tbp
->b_xio
.xio_pages
[j
];
1578 vm_page_busy_wait(m
, FALSE
, "clurpg");
1579 vm_page_io_start(m
);
1581 vm_object_pip_add(m
->object
, 1);
1582 if ((bp
->b_xio
.xio_npages
== 0) ||
1583 (bp
->b_xio
.xio_pages
[bp
->b_xio
.xio_npages
- 1] != m
)) {
1584 bp
->b_xio
.xio_pages
[bp
->b_xio
.xio_npages
] = m
;
1585 bp
->b_xio
.xio_npages
++;
1589 bp
->b_bcount
+= blksize
;
1590 bp
->b_bufsize
+= blksize
;
1593 * NOTE: see bwrite/bawrite code for why we no longer
1596 * bundirty(tbp); REMOVED
1598 tbp
->b_flags
&= ~B_ERROR
;
1599 tbp
->b_cmd
= BUF_CMD_WRITE
;
1601 cluster_append(&bp
->b_bio1
, tbp
);
1604 * check for latent dependencies to be handled
1606 if (LIST_FIRST(&tbp
->b_dep
) != NULL
)
1610 pmap_qenter(trunc_page((vm_offset_t
)bp
->b_data
),
1611 (vm_page_t
*)bp
->b_xio
.xio_pages
,
1612 bp
->b_xio
.xio_npages
);
1613 if (bp
->b_bufsize
> bp
->b_kvasize
) {
1614 panic("cluster_wbuild: b_bufsize(%d) "
1615 "> b_kvasize(%d)\n",
1616 bp
->b_bufsize
, bp
->b_kvasize
);
1618 totalwritten
+= bp
->b_bufsize
;
1620 bp
->b_dirtyend
= bp
->b_bufsize
;
1621 bp
->b_bio1
.bio_done
= cluster_callback
;
1622 bp
->b_cmd
= BUF_CMD_WRITE
;
1624 vfs_busy_pages(vp
, bp
);
1625 bsetrunningbufspace(bp
, bp
->b_bufsize
);
1627 vn_strategy(vp
, &bp
->b_bio1
);
1631 return totalwritten
;
1635 * Collect together all the buffers in a cluster, plus add one
1636 * additional buffer passed-in.
1638 * Only pre-existing buffers whos block size matches blksize are collected.
1639 * (this is primarily because HAMMER1 uses varying block sizes and we don't
1640 * want to override its choices).
1642 * This code will not try to collect buffers that it cannot lock, otherwise
1643 * it might deadlock against SMP-friendly filesystems.
1645 static struct cluster_save
*
1646 cluster_collectbufs(cluster_cache_t
*cc
, struct vnode
*vp
,
1647 struct buf
*last_bp
, int blksize
)
1649 struct cluster_save
*buflist
;
1656 len
= (int)(cc
->v_lastw
- cc
->v_cstart
+ blksize
) / blksize
;
1658 buflist
= kmalloc(sizeof(struct buf
*) * (len
+ 1) + sizeof(*buflist
),
1659 M_SEGMENT
, M_WAITOK
);
1660 buflist
->bs_nchildren
= 0;
1661 buflist
->bs_children
= (struct buf
**) (buflist
+ 1);
1662 for (loffset
= cc
->v_cstart
, i
= 0, j
= 0;
1664 (loffset
+= blksize
), i
++) {
1665 bp
= getcacheblk(vp
, loffset
,
1666 last_bp
->b_bcount
, GETBLK_SZMATCH
|
1668 buflist
->bs_children
[i
] = bp
;
1671 } else if (bp
->b_bio2
.bio_offset
== NOOFFSET
) {
1672 VOP_BMAP(bp
->b_vp
, bp
->b_loffset
,
1673 &bp
->b_bio2
.bio_offset
,
1674 NULL
, NULL
, BUF_CMD_WRITE
);
1681 for (k
= 0; k
< j
; ++k
) {
1682 if (buflist
->bs_children
[k
]) {
1683 bqrelse(buflist
->bs_children
[k
]);
1684 buflist
->bs_children
[k
] = NULL
;
1689 bcopy(buflist
->bs_children
+ j
,
1690 buflist
->bs_children
+ 0,
1691 sizeof(buflist
->bs_children
[0]) * (i
- j
));
1695 buflist
->bs_children
[i
] = bp
= last_bp
;
1696 if (bp
->b_bio2
.bio_offset
== NOOFFSET
) {
1697 VOP_BMAP(bp
->b_vp
, bp
->b_loffset
, &bp
->b_bio2
.bio_offset
,
1698 NULL
, NULL
, BUF_CMD_WRITE
);
1700 buflist
->bs_nchildren
= i
+ 1;
1705 cluster_append(struct bio
*bio
, struct buf
*tbp
)
1707 tbp
->b_cluster_next
= NULL
;
1708 if (bio
->bio_caller_info1
.cluster_head
== NULL
) {
1709 bio
->bio_caller_info1
.cluster_head
= tbp
;
1710 bio
->bio_caller_info2
.cluster_tail
= tbp
;
1712 bio
->bio_caller_info2
.cluster_tail
->b_cluster_next
= tbp
;
1713 bio
->bio_caller_info2
.cluster_tail
= tbp
;
1719 cluster_setram(struct buf
*bp
)
1721 bp
->b_flags
|= B_RAM
;
1722 if (bp
->b_xio
.xio_npages
)
1723 vm_page_flag_set(bp
->b_xio
.xio_pages
[0], PG_RAM
);
1728 cluster_clrram(struct buf
*bp
)
1730 bp
->b_flags
&= ~B_RAM
;
1731 if (bp
->b_xio
.xio_npages
)
1732 vm_page_flag_clear(bp
->b_xio
.xio_pages
[0], PG_RAM
);