3 * The Regents of the University of California. All rights reserved.
4 * Modifications/enhancements:
5 * Copyright (c) 1995 John S. Dyson. All rights reserved.
6 * Copyright (c) 2012-2013 Matthew Dillon. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include "opt_debug_cluster.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
40 #include <sys/vnode.h>
41 #include <sys/malloc.h>
42 #include <sys/mount.h>
43 #include <sys/resourcevar.h>
44 #include <sys/vmmeter.h>
46 #include <vm/vm_object.h>
47 #include <vm/vm_page.h>
48 #include <sys/sysctl.h>
51 #include <vm/vm_page2.h>
53 #include <machine/limits.h>
56 * Cluster tracking cache - replaces the original vnode v_* fields which had
57 * limited utility and were not MP safe.
59 * The cluster tracking cache is a simple 4-way set-associative non-chained
60 * cache. It is capable of tracking up to four zones separated by 1MB or
63 * NOTE: We want this structure to be cache-line friendly so the iterator
64 * is embedded rather than in a separate array.
66 * NOTE: A cluster cache entry can become stale when a vnode is recycled.
67 * For now we treat the values as heuristical but also self-consistent.
68 * i.e. the values cannot be completely random and cannot be SMP unsafe
69 * or the cluster code might end-up clustering non-contiguous buffers
70 * at the wrong offsets.
72 struct cluster_cache
{
75 off_t v_lastw
; /* last write (end) (write cluster) */
76 off_t v_cstart
; /* start block (beg) of cluster */
77 off_t v_lasta
; /* last allocation (end) */
78 u_int v_clen
; /* length of current cluster */
82 typedef struct cluster_cache cluster_cache_t
;
84 #define CLUSTER_CACHE_SIZE 512
85 #define CLUSTER_CACHE_MASK (CLUSTER_CACHE_SIZE - 1)
87 #define CLUSTER_ZONE ((off_t)(1024 * 1024))
89 cluster_cache_t cluster_array
[CLUSTER_CACHE_SIZE
];
91 #if defined(CLUSTERDEBUG)
92 #include <sys/sysctl.h>
93 static int rcluster
= 0;
94 SYSCTL_INT(_debug
, OID_AUTO
, rcluster
, CTLFLAG_RW
, &rcluster
, 0, "");
97 static MALLOC_DEFINE(M_SEGMENT
, "cluster_save", "cluster_save buffer");
99 static struct cluster_save
*
100 cluster_collectbufs (cluster_cache_t
*cc
, struct vnode
*vp
,
101 struct buf
*last_bp
, int blksize
);
103 cluster_rbuild (struct vnode
*vp
, off_t filesize
, off_t loffset
,
104 off_t doffset
, int blksize
, int run
,
105 struct buf
*fbp
, int *srp
);
106 static void cluster_callback (struct bio
*);
107 static void cluster_setram (struct buf
*);
108 static void cluster_clrram (struct buf
*);
109 static int cluster_wbuild(struct vnode
*vp
, struct buf
**bpp
, int blksize
,
110 off_t start_loffset
, int bytes
);
112 static int write_behind
= 1;
113 SYSCTL_INT(_vfs
, OID_AUTO
, write_behind
, CTLFLAG_RW
, &write_behind
, 0,
114 "Cluster write-behind setting");
115 static quad_t write_behind_minfilesize
= 10 * 1024 * 1024;
116 SYSCTL_QUAD(_vfs
, OID_AUTO
, write_behind_minfilesize
, CTLFLAG_RW
,
117 &write_behind_minfilesize
, 0, "Cluster write-behind setting");
118 static int max_readahead
= 2 * 1024 * 1024;
119 SYSCTL_INT(_vfs
, OID_AUTO
, max_readahead
, CTLFLAG_RW
, &max_readahead
, 0,
120 "Limit in bytes for desired cluster read-ahead");
122 extern vm_page_t bogus_page
;
125 * nblks is our cluster_rbuild request size. The approximate number of
126 * physical read-ahead requests is maxra / nblks. The physical request
127 * size is limited by the device (maxrbuild). We also do not want to make
128 * the request size too big or it will mess up the B_RAM streaming.
132 calc_rbuild_reqsize(int maxra
, int maxrbuild
)
136 if ((nblks
= maxra
/ 4) > maxrbuild
)
144 * Acquire/release cluster cache (can return dummy entry)
148 cluster_getcache(cluster_cache_t
*dummy
, struct vnode
*vp
, off_t loffset
)
155 hv
= (size_t)(intptr_t)vp
^ (size_t)(intptr_t)vp
/ sizeof(*vp
);
156 hv
&= CLUSTER_CACHE_MASK
& ~3;
157 cc
= &cluster_array
[hv
];
160 for (i
= 0; i
< 4; ++i
) {
163 if (((cc
[i
].v_cstart
^ loffset
) & ~(CLUSTER_ZONE
- 1)) == 0) {
168 if (xact
>= 0 && atomic_swap_int(&cc
[xact
].locked
, 1) == 0) {
169 if (cc
[xact
].vp
== vp
&&
170 ((cc
[i
].v_cstart
^ loffset
) & ~(CLUSTER_ZONE
- 1)) == 0) {
173 atomic_swap_int(&cc
[xact
].locked
, 0);
177 * New entry. If we can't acquire the cache line then use the
178 * passed-in dummy element and reset all fields.
180 * When we are able to acquire the cache line we only clear the
181 * fields if the vp does not match. This allows us to multi-zone
182 * a vp and for excessive zones / partial clusters to be retired.
184 i
= cc
->iterator
++ & 3;
186 if (atomic_swap_int(&cc
->locked
, 1) != 0) {
203 cluster_putcache(cluster_cache_t
*cc
)
205 atomic_swap_int(&cc
->locked
, 0);
209 * This replaces bread(), providing a synchronous read of the requested
210 * buffer plus asynchronous read-ahead within the specified bounds.
212 * The caller may pre-populate *bpp if it already has the requested buffer
213 * in-hand, else must set *bpp to NULL. Note that the cluster_read() inline
214 * sets *bpp to NULL and then calls cluster_readx() for compatibility.
216 * filesize - read-ahead @ blksize will not cross this boundary
217 * loffset - loffset for returned *bpp
218 * blksize - blocksize for returned *bpp and read-ahead bps
219 * minreq - minimum (not a hard minimum) in bytes, typically reflects
220 * a higher level uio resid.
221 * maxreq - maximum (sequential heuristic) in bytes (highet typ ~2MB)
222 * bpp - return buffer (*bpp) for (loffset,blksize)
225 cluster_readx(struct vnode
*vp
, off_t filesize
, off_t loffset
, int blksize
,
226 int bflags
, size_t minreq
, size_t maxreq
,
229 struct buf
*bp
, *rbp
, *reqbp
;
241 * Calculate the desired read-ahead in blksize'd blocks (maxra).
242 * To do this we calculate maxreq.
244 * maxreq typically starts out as a sequential heuristic. If the
245 * high level uio/resid is bigger (minreq), we pop maxreq up to
246 * minreq. This represents the case where random I/O is being
247 * performed by the userland is issuing big read()'s.
249 * Then we limit maxreq to max_readahead to ensure it is a reasonable
252 * Finally we must ensure that (loffset + maxreq) does not cross the
253 * boundary (filesize) for the current blocksize. If we allowed it
254 * to cross we could end up with buffers past the boundary with the
255 * wrong block size (HAMMER large-data areas use mixed block sizes).
256 * minreq is also absolutely limited to filesize.
260 /* minreq not used beyond this point */
262 if (maxreq
> max_readahead
) {
263 maxreq
= max_readahead
;
264 if (maxreq
> 16 * 1024 * 1024)
265 maxreq
= 16 * 1024 * 1024;
267 if (maxreq
< blksize
)
269 if (loffset
+ maxreq
> filesize
) {
270 if (loffset
> filesize
)
273 maxreq
= filesize
- loffset
;
276 maxra
= (int)(maxreq
/ blksize
);
279 * Get the requested block.
284 *bpp
= reqbp
= bp
= getblk(vp
, loffset
, blksize
, 0, 0);
285 origoffset
= loffset
;
288 * Calculate the maximum cluster size for a single I/O, used
289 * by cluster_rbuild().
291 maxrbuild
= vmaxiosize(vp
) / blksize
;
294 * If it is in the cache, then check to see if the reads have been
295 * sequential. If they have, then try some read-ahead, otherwise
296 * back-off on prospective read-aheads.
298 if (bp
->b_flags
& B_CACHE
) {
300 * Not sequential, do not do any read-ahead
306 * No read-ahead mark, do not do any read-ahead
309 if ((bp
->b_flags
& B_RAM
) == 0)
313 * We hit a read-ahead-mark, figure out how much read-ahead
314 * to do (maxra) and where to start (loffset).
316 * Typically the way this works is that B_RAM is set in the
317 * middle of the cluster and triggers an overlapping
318 * read-ahead of 1/2 a cluster more blocks. This ensures
319 * that the cluster read-ahead scales with the read-ahead
320 * count and is thus better-able to absorb the caller's
323 * Estimate where the next unread block will be by assuming
324 * that the B_RAM's are placed at the half-way point.
326 bp
->b_flags
&= ~B_RAM
;
329 rbp
= findblk(vp
, loffset
+ i
* blksize
, FINDBLK_TEST
);
330 if (rbp
== NULL
|| (rbp
->b_flags
& B_CACHE
) == 0) {
333 rbp
= findblk(vp
, loffset
+ i
* blksize
,
342 rbp
= findblk(vp
, loffset
+ i
* blksize
,
351 * We got everything or everything is in the cache, no
358 * Calculate where to start the read-ahead and how much
359 * to do. Generally speaking we want to read-ahead by
360 * (maxra) when we've found a read-ahead mark. We do
361 * not want to reduce maxra here as it will cause
362 * successive read-ahead I/O's to be smaller and smaller.
364 * However, we have to make sure we don't break the
365 * filesize limitation for the clustered operation.
367 loffset
+= i
* blksize
;
370 if (loffset
>= filesize
)
372 if (loffset
+ maxra
* blksize
> filesize
) {
373 maxreq
= filesize
- loffset
;
374 maxra
= (int)(maxreq
/ blksize
);
378 * Set RAM on first read-ahead block since we still have
379 * approximate maxra/2 blocks ahead of us that are already
380 * cached or in-progress.
385 * Start block is not valid, we will want to do a
388 __debugvar off_t firstread
= bp
->b_loffset
;
392 * Set-up synchronous read for bp.
394 bp
->b_cmd
= BUF_CMD_READ
;
395 bp
->b_bio1
.bio_done
= biodone_sync
;
396 bp
->b_bio1
.bio_flags
|= BIO_SYNC
;
398 KASSERT(firstread
!= NOOFFSET
,
399 ("cluster_read: no buffer offset"));
401 nblks
= calc_rbuild_reqsize(maxra
, maxrbuild
);
404 * Set RAM half-way through the full-cluster.
406 sr
= (maxra
+ 1) / 2;
411 error
= VOP_BMAP(vp
, loffset
, &doffset
,
412 &burstbytes
, NULL
, BUF_CMD_READ
);
414 goto single_block_read
;
415 if (nblks
> burstbytes
/ blksize
)
416 nblks
= burstbytes
/ blksize
;
417 if (doffset
== NOOFFSET
)
418 goto single_block_read
;
420 goto single_block_read
;
422 bp
= cluster_rbuild(vp
, filesize
, loffset
,
423 doffset
, blksize
, nblks
, bp
, &sr
);
424 loffset
+= bp
->b_bufsize
;
425 maxra
-= bp
->b_bufsize
/ blksize
;
429 * If it isn't in the cache, then get a chunk from
430 * disk if sequential, otherwise just get the block.
438 * If B_CACHE was not set issue bp. bp will either be an
439 * asynchronous cluster buf or a synchronous single-buf.
440 * If it is a single buf it will be the same as reqbp.
442 * NOTE: Once an async cluster buf is issued bp becomes invalid.
445 #if defined(CLUSTERDEBUG)
447 kprintf("S(%012jx,%d,%d)\n",
448 (intmax_t)bp
->b_loffset
, bp
->b_bcount
, maxra
);
450 if ((bp
->b_flags
& B_CLUSTER
) == 0)
451 vfs_busy_pages(vp
, bp
);
452 bp
->b_flags
&= ~(B_ERROR
| B_INVAL
| B_NOTMETA
);
453 bp
->b_flags
|= bflags
;
454 vn_strategy(vp
, &bp
->b_bio1
);
459 #if defined(CLUSTERDEBUG)
461 kprintf("cluster_rd %016jx/%d maxra=%d sr=%d\n",
462 loffset
, blksize
, maxra
, sr
);
466 * If we have been doing sequential I/O, then do some read-ahead.
467 * The code above us should have positioned us at the next likely
470 * Only mess with buffers which we can immediately lock. HAMMER
471 * will do device-readahead irrespective of what the blocks
474 * Set B_RAM on the first buffer (the next likely offset needing
475 * read-ahead), under the assumption that there are still
476 * approximately maxra/2 blocks good ahead of us.
482 rbp
= getblk(vp
, loffset
, blksize
,
483 GETBLK_SZMATCH
|GETBLK_NOWAIT
, 0);
484 #if defined(CLUSTERDEBUG)
486 kprintf("read-ahead %016jx rbp=%p ",
492 if ((rbp
->b_flags
& B_CACHE
)) {
498 * If BMAP is not supported or has an issue, we still do
499 * (maxra) read-ahead, but we do not try to use rbuild.
501 error
= VOP_BMAP(vp
, loffset
, &doffset
,
502 &burstbytes
, NULL
, BUF_CMD_READ
);
503 if (error
|| doffset
== NOOFFSET
) {
507 nblks
= calc_rbuild_reqsize(maxra
, maxrbuild
);
508 if (nblks
> burstbytes
/ blksize
)
509 nblks
= burstbytes
/ blksize
;
511 rbp
->b_cmd
= BUF_CMD_READ
;
514 rbp
= cluster_rbuild(vp
, filesize
, loffset
,
518 rbp
->b_bio2
.bio_offset
= doffset
;
523 rbp
->b_flags
&= ~(B_ERROR
| B_INVAL
| B_NOTMETA
);
524 rbp
->b_flags
|= bflags
;
526 if ((rbp
->b_flags
& B_CLUSTER
) == 0)
527 vfs_busy_pages(vp
, rbp
);
529 loffset
+= rbp
->b_bufsize
;
530 maxra
-= rbp
->b_bufsize
/ blksize
;
531 vn_strategy(vp
, &rbp
->b_bio1
);
532 /* rbp invalid now */
536 * Wait for our original buffer to complete its I/O. reqbp will
537 * be NULL if the original buffer was B_CACHE. We are returning
538 * (*bpp) which is the same as reqbp when reqbp != NULL.
542 KKASSERT(reqbp
->b_bio1
.bio_flags
& BIO_SYNC
);
543 error
= biowait(&reqbp
->b_bio1
, "clurd");
551 * This replaces breadcb(), providing an asynchronous read of the requested
552 * buffer with a callback, plus an asynchronous read-ahead within the
555 * The callback must check whether BIO_DONE is set in the bio and issue
556 * the bpdone(bp, 0) if it isn't. The callback is responsible for clearing
557 * BIO_DONE and disposing of the I/O (bqrelse()ing it).
559 * filesize - read-ahead @ blksize will not cross this boundary
560 * loffset - loffset for returned *bpp
561 * blksize - blocksize for returned *bpp and read-ahead bps
562 * minreq - minimum (not a hard minimum) in bytes, typically reflects
563 * a higher level uio resid.
564 * maxreq - maximum (sequential heuristic) in bytes (highet typ ~2MB)
565 * bpp - return buffer (*bpp) for (loffset,blksize)
568 cluster_readcb(struct vnode
*vp
, off_t filesize
, off_t loffset
, int blksize
,
569 int bflags
, size_t minreq
, size_t maxreq
,
570 void (*func
)(struct bio
*), void *arg
)
572 struct buf
*bp
, *rbp
, *reqbp
;
583 * Calculate the desired read-ahead in blksize'd blocks (maxra).
584 * To do this we calculate maxreq.
586 * maxreq typically starts out as a sequential heuristic. If the
587 * high level uio/resid is bigger (minreq), we pop maxreq up to
588 * minreq. This represents the case where random I/O is being
589 * performed by the userland is issuing big read()'s.
591 * Then we limit maxreq to max_readahead to ensure it is a reasonable
594 * Finally we must ensure that (loffset + maxreq) does not cross the
595 * boundary (filesize) for the current blocksize. If we allowed it
596 * to cross we could end up with buffers past the boundary with the
597 * wrong block size (HAMMER large-data areas use mixed block sizes).
598 * minreq is also absolutely limited to filesize.
602 /* minreq not used beyond this point */
604 if (maxreq
> max_readahead
) {
605 maxreq
= max_readahead
;
606 if (maxreq
> 16 * 1024 * 1024)
607 maxreq
= 16 * 1024 * 1024;
609 if (maxreq
< blksize
)
611 if (loffset
+ maxreq
> filesize
) {
612 if (loffset
> filesize
)
615 maxreq
= filesize
- loffset
;
618 maxra
= (int)(maxreq
/ blksize
);
621 * Get the requested block.
623 reqbp
= bp
= getblk(vp
, loffset
, blksize
, 0, 0);
624 origoffset
= loffset
;
627 * Calculate the maximum cluster size for a single I/O, used
628 * by cluster_rbuild().
630 maxrbuild
= vmaxiosize(vp
) / blksize
;
633 * if it is in the cache, then check to see if the reads have been
634 * sequential. If they have, then try some read-ahead, otherwise
635 * back-off on prospective read-aheads.
637 if (bp
->b_flags
& B_CACHE
) {
639 * Setup for func() call whether we do read-ahead or not.
641 bp
->b_bio1
.bio_caller_info1
.ptr
= arg
;
642 bp
->b_bio1
.bio_flags
|= BIO_DONE
;
645 * Not sequential, do not do any read-ahead
651 * No read-ahead mark, do not do any read-ahead
654 if ((bp
->b_flags
& B_RAM
) == 0)
656 bp
->b_flags
&= ~B_RAM
;
659 * We hit a read-ahead-mark, figure out how much read-ahead
660 * to do (maxra) and where to start (loffset).
662 * Shortcut the scan. Typically the way this works is that
663 * we've built up all the blocks inbetween except for the
664 * last in previous iterations, so if the second-to-last
665 * block is present we just skip ahead to it.
667 * This algorithm has O(1) cpu in the steady state no
668 * matter how large maxra is.
670 if (findblk(vp
, loffset
+ (maxra
- 2) * blksize
, FINDBLK_TEST
))
675 if (findblk(vp
, loffset
+ i
* blksize
,
676 FINDBLK_TEST
) == NULL
) {
683 * We got everything or everything is in the cache, no
690 * Calculate where to start the read-ahead and how much
691 * to do. Generally speaking we want to read-ahead by
692 * (maxra) when we've found a read-ahead mark. We do
693 * not want to reduce maxra here as it will cause
694 * successive read-ahead I/O's to be smaller and smaller.
696 * However, we have to make sure we don't break the
697 * filesize limitation for the clustered operation.
699 loffset
+= i
* blksize
;
701 /* leave reqbp intact to force function callback */
703 if (loffset
>= filesize
)
705 if (loffset
+ maxra
* blksize
> filesize
) {
706 maxreq
= filesize
- loffset
;
707 maxra
= (int)(maxreq
/ blksize
);
712 * bp is not valid, no prior cluster in progress so get a
713 * full cluster read-ahead going.
715 __debugvar off_t firstread
= bp
->b_loffset
;
720 * Set-up synchronous read for bp.
722 bp
->b_flags
&= ~(B_ERROR
| B_EINTR
| B_INVAL
| B_NOTMETA
);
723 bp
->b_flags
|= bflags
;
724 bp
->b_cmd
= BUF_CMD_READ
;
725 bp
->b_bio1
.bio_done
= func
;
726 bp
->b_bio1
.bio_caller_info1
.ptr
= arg
;
728 reqbp
= NULL
; /* don't func() reqbp, it's running async */
730 KASSERT(firstread
!= NOOFFSET
,
731 ("cluster_read: no buffer offset"));
734 * nblks is our cluster_rbuild request size, limited
735 * primarily by the device.
737 nblks
= calc_rbuild_reqsize(maxra
, maxrbuild
);
740 * Set RAM half-way through the full-cluster.
742 sr
= (maxra
+ 1) / 2;
747 error
= VOP_BMAP(vp
, loffset
, &doffset
,
748 &burstbytes
, NULL
, BUF_CMD_READ
);
750 goto single_block_read
;
751 if (nblks
> burstbytes
/ blksize
)
752 nblks
= burstbytes
/ blksize
;
753 if (doffset
== NOOFFSET
)
754 goto single_block_read
;
756 goto single_block_read
;
758 bp
= cluster_rbuild(vp
, filesize
, loffset
,
759 doffset
, blksize
, nblks
, bp
, &sr
);
760 loffset
+= bp
->b_bufsize
;
761 maxra
-= bp
->b_bufsize
/ blksize
;
765 * If it isn't in the cache, then get a chunk from
766 * disk if sequential, otherwise just get the block.
774 * If bp != NULL then B_CACHE was *NOT* set and bp must be issued.
775 * bp will either be an asynchronous cluster buf or an asynchronous
778 * NOTE: Once an async cluster buf is issued bp becomes invalid.
781 #if defined(CLUSTERDEBUG)
783 kprintf("S(%012jx,%d,%d)\n",
784 (intmax_t)bp
->b_loffset
, bp
->b_bcount
, maxra
);
786 if ((bp
->b_flags
& B_CLUSTER
) == 0)
787 vfs_busy_pages(vp
, bp
);
788 bp
->b_flags
&= ~(B_ERROR
| B_INVAL
| B_NOTMETA
);
789 bp
->b_flags
|= bflags
;
790 vn_strategy(vp
, &bp
->b_bio1
);
795 #if defined(CLUSTERDEBUG)
797 kprintf("cluster_rd %016jx/%d maxra=%d sr=%d\n",
798 loffset
, blksize
, maxra
, sr
);
802 * If we have been doing sequential I/O, then do some read-ahead.
803 * The code above us should have positioned us at the next likely
806 * Only mess with buffers which we can immediately lock. HAMMER
807 * will do device-readahead irrespective of what the blocks
815 rbp
= getblk(vp
, loffset
, blksize
,
816 GETBLK_SZMATCH
|GETBLK_NOWAIT
, 0);
819 if ((rbp
->b_flags
& B_CACHE
)) {
825 * If BMAP is not supported or has an issue, we still do
826 * (maxra) read-ahead, but we do not try to use rbuild.
828 error
= VOP_BMAP(vp
, loffset
, &doffset
,
829 &burstbytes
, NULL
, BUF_CMD_READ
);
830 if (error
|| doffset
== NOOFFSET
) {
834 nblks
= calc_rbuild_reqsize(maxra
, maxrbuild
);
835 if (nblks
> burstbytes
/ blksize
)
836 nblks
= burstbytes
/ blksize
;
838 rbp
->b_cmd
= BUF_CMD_READ
;
841 rbp
= cluster_rbuild(vp
, filesize
, loffset
,
845 rbp
->b_bio2
.bio_offset
= doffset
;
850 rbp
->b_flags
&= ~(B_ERROR
| B_INVAL
| B_NOTMETA
);
851 rbp
->b_flags
|= bflags
;
853 if ((rbp
->b_flags
& B_CLUSTER
) == 0)
854 vfs_busy_pages(vp
, rbp
);
856 loffset
+= rbp
->b_bufsize
;
857 maxra
-= rbp
->b_bufsize
/ blksize
;
858 vn_strategy(vp
, &rbp
->b_bio1
);
859 /* rbp invalid now */
863 * If reqbp is non-NULL it had B_CACHE set and we issue the
864 * function callback synchronously.
866 * Note that we may start additional asynchronous I/O before doing
867 * the func() callback for the B_CACHE case
871 func(&reqbp
->b_bio1
);
875 * If blocks are contiguous on disk, use this to provide clustered
876 * read ahead. We will read as many blocks as possible sequentially
877 * and then parcel them up into logical blocks in the buffer hash table.
879 * This function either returns a cluster buf or it returns fbp. fbp is
880 * already expected to be set up as a synchronous or asynchronous request.
882 * If a cluster buf is returned it will always be async.
884 * (*srp) counts down original blocks to determine where B_RAM should be set.
885 * Set B_RAM when *srp drops to 0. If (*srp) starts at 0, B_RAM will not be
886 * set on any buffer. Make sure B_RAM is cleared on any other buffers to
887 * prevent degenerate read-aheads from being generated.
890 cluster_rbuild(struct vnode
*vp
, off_t filesize
, off_t loffset
, off_t doffset
,
891 int blksize
, int run
, struct buf
*fbp
, int *srp
)
893 struct buf
*bp
, *tbp
;
896 int maxiosize
= vmaxiosize(vp
);
901 while (loffset
+ run
* blksize
> filesize
) {
906 tbp
->b_bio2
.bio_offset
= doffset
;
907 if((tbp
->b_flags
& B_MALLOC
) ||
908 ((tbp
->b_flags
& B_VMIO
) == 0) || (run
<= 1)) {
917 * Get a pbuf, limit cluster I/O on a per-device basis. If
918 * doing cluster I/O for a file, limit cluster I/O on a
921 if (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
)
922 bp
= trypbuf_kva(&vp
->v_pbuf_count
);
924 bp
= trypbuf_kva(&vp
->v_mount
->mnt_pbuf_count
);
930 * We are synthesizing a buffer out of vm_page_t's, but
931 * if the block size is not page aligned then the starting
932 * address may not be either. Inherit the b_data offset
933 * from the original buffer.
936 bp
->b_data
= (char *)((vm_offset_t
)bp
->b_data
|
937 ((vm_offset_t
)tbp
->b_data
& PAGE_MASK
));
938 bp
->b_flags
|= B_CLUSTER
| B_VMIO
;
939 bp
->b_cmd
= BUF_CMD_READ
;
940 bp
->b_bio1
.bio_done
= cluster_callback
; /* default to async */
941 bp
->b_bio1
.bio_caller_info1
.cluster_head
= NULL
;
942 bp
->b_bio1
.bio_caller_info2
.cluster_tail
= NULL
;
943 bp
->b_loffset
= loffset
;
944 bp
->b_bio2
.bio_offset
= doffset
;
945 KASSERT(bp
->b_loffset
!= NOOFFSET
,
946 ("cluster_rbuild: no buffer offset"));
950 bp
->b_xio
.xio_npages
= 0;
952 for (boffset
= doffset
, i
= 0; i
< run
; ++i
, boffset
+= blksize
) {
954 if ((bp
->b_xio
.xio_npages
* PAGE_SIZE
) +
955 round_page(blksize
) > maxiosize
) {
960 * Shortcut some checks and try to avoid buffers that
961 * would block in the lock. The same checks have to
962 * be made again after we officially get the buffer.
964 tbp
= getblk(vp
, loffset
+ i
* blksize
, blksize
,
965 GETBLK_SZMATCH
|GETBLK_NOWAIT
, 0);
968 for (j
= 0; j
< tbp
->b_xio
.xio_npages
; j
++) {
969 if (tbp
->b_xio
.xio_pages
[j
]->valid
)
972 if (j
!= tbp
->b_xio
.xio_npages
) {
978 * Stop scanning if the buffer is fuly valid
979 * (marked B_CACHE), or locked (may be doing a
980 * background write), or if the buffer is not
981 * VMIO backed. The clustering code can only deal
982 * with VMIO-backed buffers.
984 if ((tbp
->b_flags
& (B_CACHE
|B_LOCKED
)) ||
985 (tbp
->b_flags
& B_VMIO
) == 0 ||
986 (LIST_FIRST(&tbp
->b_dep
) != NULL
&&
994 * The buffer must be completely invalid in order to
995 * take part in the cluster. If it is partially valid
998 for (j
= 0;j
< tbp
->b_xio
.xio_npages
; j
++) {
999 if (tbp
->b_xio
.xio_pages
[j
]->valid
)
1002 if (j
!= tbp
->b_xio
.xio_npages
) {
1008 * Depress the priority of buffers not explicitly
1011 /* tbp->b_flags |= B_AGE; */
1014 * Set the block number if it isn't set, otherwise
1015 * if it is make sure it matches the block number we
1018 if (tbp
->b_bio2
.bio_offset
== NOOFFSET
) {
1019 tbp
->b_bio2
.bio_offset
= boffset
;
1020 } else if (tbp
->b_bio2
.bio_offset
!= boffset
) {
1027 * Set B_RAM if (*srp) is 1. B_RAM is only set on one buffer
1028 * in the cluster, including potentially the first buffer
1029 * once we start streaming the read-aheads.
1032 cluster_setram(tbp
);
1034 cluster_clrram(tbp
);
1037 * The passed-in tbp (i == 0) will already be set up for
1038 * async or sync operation. All other tbp's acquire in
1039 * our loop are set up for async operation.
1041 tbp
->b_cmd
= BUF_CMD_READ
;
1043 cluster_append(&bp
->b_bio1
, tbp
);
1044 for (j
= 0; j
< tbp
->b_xio
.xio_npages
; ++j
) {
1047 m
= tbp
->b_xio
.xio_pages
[j
];
1048 vm_page_busy_wait(m
, FALSE
, "clurpg");
1049 vm_page_io_start(m
);
1051 vm_object_pip_add(m
->object
, 1);
1052 if ((bp
->b_xio
.xio_npages
== 0) ||
1053 (bp
->b_xio
.xio_pages
[bp
->b_xio
.xio_npages
-1] != m
)) {
1054 bp
->b_xio
.xio_pages
[bp
->b_xio
.xio_npages
] = m
;
1055 bp
->b_xio
.xio_npages
++;
1057 if ((m
->valid
& VM_PAGE_BITS_ALL
) == VM_PAGE_BITS_ALL
) {
1058 tbp
->b_xio
.xio_pages
[j
] = bogus_page
;
1059 tbp
->b_flags
|= B_HASBOGUS
;
1063 * XXX shouldn't this be += size for both, like in
1066 * Don't inherit tbp->b_bufsize as it may be larger due to
1067 * a non-page-aligned size. Instead just aggregate using
1070 if (tbp
->b_bcount
!= blksize
)
1071 kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp
->b_bcount
, blksize
);
1072 if (tbp
->b_bufsize
!= blksize
)
1073 kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp
->b_bufsize
, blksize
);
1074 bp
->b_bcount
+= blksize
;
1075 bp
->b_bufsize
+= blksize
;
1079 * Fully valid pages in the cluster are already good and do not need
1080 * to be re-read from disk. Replace the page with bogus_page
1082 for (j
= 0; j
< bp
->b_xio
.xio_npages
; j
++) {
1083 if ((bp
->b_xio
.xio_pages
[j
]->valid
& VM_PAGE_BITS_ALL
) ==
1085 bp
->b_xio
.xio_pages
[j
] = bogus_page
;
1086 bp
->b_flags
|= B_HASBOGUS
;
1089 if (bp
->b_bufsize
> bp
->b_kvasize
) {
1090 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
1091 bp
->b_bufsize
, bp
->b_kvasize
);
1093 pmap_qenter(trunc_page((vm_offset_t
) bp
->b_data
),
1094 (vm_page_t
*)bp
->b_xio
.xio_pages
, bp
->b_xio
.xio_npages
);
1100 * Cleanup after a clustered read or write.
1101 * This is complicated by the fact that any of the buffers might have
1102 * extra memory (if there were no empty buffer headers at allocbuf time)
1103 * that we will need to shift around.
1105 * The returned bio is &bp->b_bio1
1108 cluster_callback(struct bio
*bio
)
1110 struct buf
*bp
= bio
->bio_buf
;
1116 * Must propogate errors to all the components. A short read (EOF)
1117 * is a critical error.
1119 if (bp
->b_flags
& B_ERROR
) {
1120 error
= bp
->b_error
;
1121 } else if (bp
->b_bcount
!= bp
->b_bufsize
) {
1122 panic("cluster_callback: unexpected EOF on cluster %p!", bio
);
1125 pmap_qremove(trunc_page((vm_offset_t
) bp
->b_data
),
1126 bp
->b_xio
.xio_npages
);
1128 * Move memory from the large cluster buffer into the component
1129 * buffers and mark IO as done on these. Since the memory map
1130 * is the same, no actual copying is required.
1132 while ((tbp
= bio
->bio_caller_info1
.cluster_head
) != NULL
) {
1133 bio
->bio_caller_info1
.cluster_head
= tbp
->b_cluster_next
;
1135 tbp
->b_flags
|= B_ERROR
| B_IOISSUED
;
1136 tbp
->b_error
= error
;
1138 tbp
->b_dirtyoff
= tbp
->b_dirtyend
= 0;
1139 tbp
->b_flags
&= ~(B_ERROR
| B_INVAL
);
1140 if (tbp
->b_cmd
== BUF_CMD_READ
) {
1141 tbp
->b_flags
= (tbp
->b_flags
& ~B_NOTMETA
) |
1142 (bp
->b_flags
& B_NOTMETA
);
1144 tbp
->b_flags
|= B_IOISSUED
;
1146 * XXX the bdwrite()/bqrelse() issued during
1147 * cluster building clears B_RELBUF (see bqrelse()
1148 * comment). If direct I/O was specified, we have
1149 * to restore it here to allow the buffer and VM
1152 if (tbp
->b_flags
& B_DIRECT
)
1153 tbp
->b_flags
|= B_RELBUF
;
1156 * XXX I think biodone() below will do this, but do
1157 * it here anyway for consistency.
1159 if (tbp
->b_cmd
== BUF_CMD_WRITE
)
1162 biodone(&tbp
->b_bio1
);
1166 if (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
)
1167 relpbuf(bp
, &vp
->v_pbuf_count
);
1169 relpbuf(bp
, &vp
->v_mount
->mnt_pbuf_count
);
1173 * Implement modified write build for cluster.
1175 * write_behind = 0 write behind disabled
1176 * write_behind = 1 write behind normal (default)
1177 * write_behind = 2 write behind backed-off
1179 * In addition, write_behind is only activated for files that have
1180 * grown past a certain size (default 10MB). Otherwise temporary files
1181 * wind up generating a lot of unnecessary disk I/O.
1184 cluster_wbuild_wb(struct vnode
*vp
, int blksize
, off_t start_loffset
, int len
)
1188 switch(write_behind
) {
1190 if (start_loffset
< len
)
1192 start_loffset
-= len
;
1195 if (vp
->v_filesize
>= write_behind_minfilesize
) {
1196 r
= cluster_wbuild(vp
, NULL
, blksize
,
1197 start_loffset
, len
);
1208 * Do clustered write for FFS.
1211 * 1. Write is not sequential (write asynchronously)
1212 * Write is sequential:
1213 * 2. beginning of cluster - begin cluster
1214 * 3. middle of a cluster - add to cluster
1215 * 4. end of a cluster - asynchronously write cluster
1217 * WARNING! vnode fields are not locked and must ONLY be used heuristically.
1220 cluster_write(struct buf
*bp
, off_t filesize
, int blksize
, int seqcount
)
1224 int maxclen
, cursize
;
1226 cluster_cache_t dummy
;
1227 cluster_cache_t
*cc
;
1230 if (vp
->v_type
== VREG
)
1231 async
= vp
->v_mount
->mnt_flag
& MNT_ASYNC
;
1234 loffset
= bp
->b_loffset
;
1235 KASSERT(bp
->b_loffset
!= NOOFFSET
,
1236 ("cluster_write: no buffer offset"));
1238 cc
= cluster_getcache(&dummy
, vp
, loffset
);
1241 * Initialize vnode to beginning of file.
1244 cc
->v_lasta
= cc
->v_clen
= cc
->v_cstart
= cc
->v_lastw
= 0;
1246 if (cc
->v_clen
== 0 || loffset
!= cc
->v_lastw
||
1247 (bp
->b_bio2
.bio_offset
!= NOOFFSET
&&
1248 (bp
->b_bio2
.bio_offset
!= cc
->v_lasta
))) {
1250 * Next block is not logically sequential, or, if physical
1251 * block offsets are available, not physically sequential.
1253 * If physical block offsets are not available we only
1254 * get here if we weren't logically sequential.
1256 maxclen
= vmaxiosize(vp
);
1257 if (cc
->v_clen
!= 0) {
1259 * Next block is not sequential.
1261 * If we are not writing at end of file, the process
1262 * seeked to another point in the file since its last
1263 * write, or we have reached our maximum cluster size,
1264 * then push the previous cluster. Otherwise try
1265 * reallocating to make it sequential.
1267 * Change to algorithm: only push previous cluster if
1268 * it was sequential from the point of view of the
1269 * seqcount heuristic, otherwise leave the buffer
1270 * intact so we can potentially optimize the I/O
1271 * later on in the buf_daemon or update daemon
1274 cursize
= cc
->v_lastw
- cc
->v_cstart
;
1275 if (bp
->b_loffset
+ blksize
< filesize
||
1276 loffset
!= cc
->v_lastw
||
1277 cc
->v_clen
<= cursize
) {
1278 if (!async
&& seqcount
> 0) {
1279 cluster_wbuild_wb(vp
, blksize
,
1280 cc
->v_cstart
, cursize
);
1283 struct buf
**bpp
, **endbp
;
1284 struct cluster_save
*buflist
;
1286 buflist
= cluster_collectbufs(cc
, vp
,
1288 endbp
= &buflist
->bs_children
1289 [buflist
->bs_nchildren
- 1];
1290 if (VOP_REALLOCBLKS(vp
, buflist
)) {
1292 * Failed, push the previous cluster
1293 * if *really* writing sequentially
1294 * in the logical file (seqcount > 1),
1295 * otherwise delay it in the hopes that
1296 * the low level disk driver can
1297 * optimize the write ordering.
1299 * NOTE: We do not brelse the last
1300 * element which is bp, and we
1301 * do not return here.
1303 for (bpp
= buflist
->bs_children
;
1306 kfree(buflist
, M_SEGMENT
);
1308 cluster_wbuild_wb(vp
,
1309 blksize
, cc
->v_cstart
,
1314 * Succeeded, keep building cluster.
1316 for (bpp
= buflist
->bs_children
;
1317 bpp
<= endbp
; bpp
++)
1319 kfree(buflist
, M_SEGMENT
);
1320 cc
->v_lastw
= loffset
+ blksize
;
1321 cc
->v_lasta
= bp
->b_bio2
.bio_offset
+
1323 cluster_putcache(cc
);
1330 * Consider beginning a cluster. If at end of file, make
1331 * cluster as large as possible, otherwise find size of
1334 if ((vp
->v_type
== VREG
) &&
1335 bp
->b_loffset
+ blksize
< filesize
&&
1336 (bp
->b_bio2
.bio_offset
== NOOFFSET
) &&
1337 (VOP_BMAP(vp
, loffset
, &bp
->b_bio2
.bio_offset
, &maxclen
, NULL
, BUF_CMD_WRITE
) ||
1338 bp
->b_bio2
.bio_offset
== NOOFFSET
)) {
1341 cc
->v_lasta
= bp
->b_bio2
.bio_offset
+ blksize
;
1342 cc
->v_cstart
= loffset
;
1343 cc
->v_lastw
= loffset
+ blksize
;
1344 cluster_putcache(cc
);
1347 if (maxclen
> blksize
)
1348 cc
->v_clen
= maxclen
;
1350 cc
->v_clen
= blksize
;
1351 if (!async
&& cc
->v_clen
== 0) { /* I/O not contiguous */
1352 cc
->v_cstart
= loffset
;
1354 } else { /* Wait for rest of cluster */
1355 cc
->v_cstart
= loffset
;
1358 } else if (loffset
== cc
->v_cstart
+ cc
->v_clen
) {
1360 * At end of cluster, write it out if seqcount tells us we
1361 * are operating sequentially, otherwise let the buf or
1362 * update daemon handle it.
1366 cluster_wbuild_wb(vp
, blksize
, cc
->v_cstart
,
1367 cc
->v_clen
+ blksize
);
1369 cc
->v_cstart
= loffset
;
1370 } else if (vm_page_count_severe() &&
1371 bp
->b_loffset
+ blksize
< filesize
) {
1373 * We are low on memory, get it going NOW. However, do not
1374 * try to push out a partial block at the end of the file
1375 * as this could lead to extremely non-optimal write activity.
1380 * In the middle of a cluster, so just delay the I/O for now.
1384 cc
->v_lastw
= loffset
+ blksize
;
1385 cc
->v_lasta
= bp
->b_bio2
.bio_offset
+ blksize
;
1386 cluster_putcache(cc
);
1390 * This is the clustered version of bawrite(). It works similarly to
1391 * cluster_write() except I/O on the buffer is guaranteed to occur.
1394 cluster_awrite(struct buf
*bp
)
1399 * Don't bother if it isn't clusterable.
1401 if ((bp
->b_flags
& B_CLUSTEROK
) == 0 ||
1403 (bp
->b_vp
->v_flag
& VOBJBUF
) == 0) {
1404 total
= bp
->b_bufsize
;
1409 total
= cluster_wbuild(bp
->b_vp
, &bp
, bp
->b_bufsize
,
1410 bp
->b_loffset
, vmaxiosize(bp
->b_vp
));
1413 * If bp is still non-NULL then cluster_wbuild() did not initiate
1414 * I/O on it and we must do so here to provide the API guarantee.
1423 * This is an awful lot like cluster_rbuild...wish they could be combined.
1424 * The last lbn argument is the current block on which I/O is being
1425 * performed. Check to see that it doesn't fall in the middle of
1426 * the current block (if last_bp == NULL).
1428 * cluster_wbuild() normally does not guarantee anything. If bpp is
1429 * non-NULL and cluster_wbuild() is able to incorporate it into the
1430 * I/O it will set *bpp to NULL, otherwise it will leave it alone and
1431 * the caller must dispose of *bpp.
1434 cluster_wbuild(struct vnode
*vp
, struct buf
**bpp
,
1435 int blksize
, off_t start_loffset
, int bytes
)
1437 struct buf
*bp
, *tbp
;
1439 int totalwritten
= 0;
1441 int maxiosize
= vmaxiosize(vp
);
1445 * If the buffer matches the passed locked & removed buffer
1446 * we used the passed buffer (which might not be B_DELWRI).
1448 * Otherwise locate the buffer and determine if it is
1451 if (bpp
&& (*bpp
)->b_loffset
== start_loffset
) {
1456 tbp
= findblk(vp
, start_loffset
, FINDBLK_NBLOCK
);
1458 (tbp
->b_flags
& (B_LOCKED
| B_INVAL
| B_DELWRI
)) !=
1460 (LIST_FIRST(&tbp
->b_dep
) && buf_checkwrite(tbp
))) {
1463 start_loffset
+= blksize
;
1469 KKASSERT(tbp
->b_cmd
== BUF_CMD_DONE
);
1472 * Extra memory in the buffer, punt on this buffer.
1473 * XXX we could handle this in most cases, but we would
1474 * have to push the extra memory down to after our max
1475 * possible cluster size and then potentially pull it back
1476 * up if the cluster was terminated prematurely--too much
1479 if (((tbp
->b_flags
& (B_CLUSTEROK
|B_MALLOC
)) != B_CLUSTEROK
) ||
1480 (tbp
->b_bcount
!= tbp
->b_bufsize
) ||
1481 (tbp
->b_bcount
!= blksize
) ||
1482 (bytes
== blksize
)) {
1483 totalwritten
+= tbp
->b_bufsize
;
1485 start_loffset
+= blksize
;
1491 * Get a pbuf, limit cluster I/O on a per-device basis. If
1492 * doing cluster I/O for a file, limit cluster I/O on a
1495 * HAMMER and other filesystems may attempt to queue a massive
1496 * amount of write I/O, using trypbuf() here easily results in
1497 * situation where the I/O stream becomes non-clustered.
1499 if (vp
->v_type
== VCHR
|| vp
->v_type
== VBLK
)
1500 bp
= getpbuf_kva(&vp
->v_pbuf_count
);
1502 bp
= getpbuf_kva(&vp
->v_mount
->mnt_pbuf_count
);
1505 * Set up the pbuf. Track our append point with b_bcount
1506 * and b_bufsize. b_bufsize is not used by the device but
1507 * our caller uses it to loop clusters and we use it to
1508 * detect a premature EOF on the block device.
1512 bp
->b_xio
.xio_npages
= 0;
1513 bp
->b_loffset
= tbp
->b_loffset
;
1514 bp
->b_bio2
.bio_offset
= tbp
->b_bio2
.bio_offset
;
1518 * We are synthesizing a buffer out of vm_page_t's, but
1519 * if the block size is not page aligned then the starting
1520 * address may not be either. Inherit the b_data offset
1521 * from the original buffer.
1523 bp
->b_data
= (char *)((vm_offset_t
)bp
->b_data
|
1524 ((vm_offset_t
)tbp
->b_data
& PAGE_MASK
));
1525 bp
->b_flags
&= ~(B_ERROR
| B_NOTMETA
);
1526 bp
->b_flags
|= B_CLUSTER
| B_BNOCLIP
|
1527 (tbp
->b_flags
& (B_VMIO
| B_NEEDCOMMIT
|
1529 bp
->b_bio1
.bio_caller_info1
.cluster_head
= NULL
;
1530 bp
->b_bio1
.bio_caller_info2
.cluster_tail
= NULL
;
1533 * From this location in the file, scan forward to see
1534 * if there are buffers with adjacent data that need to
1535 * be written as well.
1537 * IO *must* be initiated on index 0 at this point
1538 * (particularly when called from cluster_awrite()).
1540 for (i
= 0; i
< bytes
; (i
+= blksize
), (start_loffset
+= blksize
)) {
1548 tbp
= findblk(vp
, start_loffset
,
1551 * Buffer not found or could not be locked
1558 * If it IS in core, but has different
1559 * characteristics, then don't cluster
1562 if ((tbp
->b_flags
& (B_VMIO
| B_CLUSTEROK
|
1563 B_INVAL
| B_DELWRI
| B_NEEDCOMMIT
))
1564 != (B_DELWRI
| B_CLUSTEROK
|
1565 (bp
->b_flags
& (B_VMIO
| B_NEEDCOMMIT
))) ||
1566 (tbp
->b_flags
& B_LOCKED
)
1573 * Check that the combined cluster
1574 * would make sense with regard to pages
1575 * and would not be too large
1577 * WARNING! buf_checkwrite() must be the last
1578 * check made. If it returns 0 then
1579 * we must initiate the I/O.
1581 if ((tbp
->b_bcount
!= blksize
) ||
1582 ((bp
->b_bio2
.bio_offset
+ i
) !=
1583 tbp
->b_bio2
.bio_offset
) ||
1584 ((tbp
->b_xio
.xio_npages
+ bp
->b_xio
.xio_npages
) >
1585 (maxiosize
/ PAGE_SIZE
)) ||
1586 (LIST_FIRST(&tbp
->b_dep
) &&
1587 buf_checkwrite(tbp
))
1592 if (LIST_FIRST(&tbp
->b_dep
))
1595 * Ok, it's passed all the tests,
1596 * so remove it from the free list
1597 * and mark it busy. We will use it.
1600 KKASSERT(tbp
->b_cmd
== BUF_CMD_DONE
);
1604 * If the IO is via the VM then we do some
1605 * special VM hackery (yuck). Since the buffer's
1606 * block size may not be page-aligned it is possible
1607 * for a page to be shared between two buffers. We
1608 * have to get rid of the duplication when building
1611 if (tbp
->b_flags
& B_VMIO
) {
1615 * Try to avoid deadlocks with the VM system.
1616 * However, we cannot abort the I/O if
1617 * must_initiate is non-zero.
1619 if (must_initiate
== 0) {
1621 j
< tbp
->b_xio
.xio_npages
;
1623 m
= tbp
->b_xio
.xio_pages
[j
];
1624 if (m
->flags
& PG_BUSY
) {
1631 for (j
= 0; j
< tbp
->b_xio
.xio_npages
; ++j
) {
1632 m
= tbp
->b_xio
.xio_pages
[j
];
1633 vm_page_busy_wait(m
, FALSE
, "clurpg");
1634 vm_page_io_start(m
);
1636 vm_object_pip_add(m
->object
, 1);
1637 if ((bp
->b_xio
.xio_npages
== 0) ||
1638 (bp
->b_xio
.xio_pages
[bp
->b_xio
.xio_npages
- 1] != m
)) {
1639 bp
->b_xio
.xio_pages
[bp
->b_xio
.xio_npages
] = m
;
1640 bp
->b_xio
.xio_npages
++;
1644 bp
->b_bcount
+= blksize
;
1645 bp
->b_bufsize
+= blksize
;
1648 * NOTE: see bwrite/bawrite code for why we no longer
1651 * bundirty(tbp); REMOVED
1653 tbp
->b_flags
&= ~B_ERROR
;
1654 tbp
->b_cmd
= BUF_CMD_WRITE
;
1656 cluster_append(&bp
->b_bio1
, tbp
);
1659 * check for latent dependencies to be handled
1661 if (LIST_FIRST(&tbp
->b_dep
) != NULL
)
1665 pmap_qenter(trunc_page((vm_offset_t
)bp
->b_data
),
1666 (vm_page_t
*)bp
->b_xio
.xio_pages
,
1667 bp
->b_xio
.xio_npages
);
1668 if (bp
->b_bufsize
> bp
->b_kvasize
) {
1669 panic("cluster_wbuild: b_bufsize(%d) "
1670 "> b_kvasize(%d)\n",
1671 bp
->b_bufsize
, bp
->b_kvasize
);
1673 totalwritten
+= bp
->b_bufsize
;
1675 bp
->b_dirtyend
= bp
->b_bufsize
;
1676 bp
->b_bio1
.bio_done
= cluster_callback
;
1677 bp
->b_cmd
= BUF_CMD_WRITE
;
1679 vfs_busy_pages(vp
, bp
);
1680 bsetrunningbufspace(bp
, bp
->b_bufsize
);
1682 vn_strategy(vp
, &bp
->b_bio1
);
1686 return totalwritten
;
1690 * Collect together all the buffers in a cluster, plus add one
1691 * additional buffer passed-in.
1693 * Only pre-existing buffers whos block size matches blksize are collected.
1694 * (this is primarily because HAMMER1 uses varying block sizes and we don't
1695 * want to override its choices).
1697 * This code will not try to collect buffers that it cannot lock, otherwise
1698 * it might deadlock against SMP-friendly filesystems.
1700 static struct cluster_save
*
1701 cluster_collectbufs(cluster_cache_t
*cc
, struct vnode
*vp
,
1702 struct buf
*last_bp
, int blksize
)
1704 struct cluster_save
*buflist
;
1711 len
= (int)(cc
->v_lastw
- cc
->v_cstart
) / blksize
;
1713 buflist
= kmalloc(sizeof(struct buf
*) * (len
+ 1) + sizeof(*buflist
),
1714 M_SEGMENT
, M_WAITOK
);
1715 buflist
->bs_nchildren
= 0;
1716 buflist
->bs_children
= (struct buf
**) (buflist
+ 1);
1717 for (loffset
= cc
->v_cstart
, i
= 0, j
= 0;
1719 (loffset
+= blksize
), i
++) {
1720 bp
= getcacheblk(vp
, loffset
,
1721 last_bp
->b_bcount
, GETBLK_SZMATCH
|
1723 buflist
->bs_children
[i
] = bp
;
1726 } else if (bp
->b_bio2
.bio_offset
== NOOFFSET
) {
1727 VOP_BMAP(bp
->b_vp
, bp
->b_loffset
,
1728 &bp
->b_bio2
.bio_offset
,
1729 NULL
, NULL
, BUF_CMD_WRITE
);
1736 for (k
= 0; k
< j
; ++k
) {
1737 if (buflist
->bs_children
[k
]) {
1738 bqrelse(buflist
->bs_children
[k
]);
1739 buflist
->bs_children
[k
] = NULL
;
1744 bcopy(buflist
->bs_children
+ j
,
1745 buflist
->bs_children
+ 0,
1746 sizeof(buflist
->bs_children
[0]) * (i
- j
));
1750 buflist
->bs_children
[i
] = bp
= last_bp
;
1751 if (bp
->b_bio2
.bio_offset
== NOOFFSET
) {
1752 VOP_BMAP(bp
->b_vp
, bp
->b_loffset
, &bp
->b_bio2
.bio_offset
,
1753 NULL
, NULL
, BUF_CMD_WRITE
);
1755 buflist
->bs_nchildren
= i
+ 1;
1760 cluster_append(struct bio
*bio
, struct buf
*tbp
)
1762 tbp
->b_cluster_next
= NULL
;
1763 if (bio
->bio_caller_info1
.cluster_head
== NULL
) {
1764 bio
->bio_caller_info1
.cluster_head
= tbp
;
1765 bio
->bio_caller_info2
.cluster_tail
= tbp
;
1767 bio
->bio_caller_info2
.cluster_tail
->b_cluster_next
= tbp
;
1768 bio
->bio_caller_info2
.cluster_tail
= tbp
;
1774 cluster_setram(struct buf
*bp
)
1776 bp
->b_flags
|= B_RAM
;
1777 if (bp
->b_xio
.xio_npages
)
1778 vm_page_flag_set(bp
->b_xio
.xio_pages
[0], PG_RAM
);
1783 cluster_clrram(struct buf
*bp
)
1785 bp
->b_flags
&= ~B_RAM
;
1786 if (bp
->b_xio
.xio_npages
)
1787 vm_page_flag_clear(bp
->b_xio
.xio_pages
[0], PG_RAM
);