kernel - refactor vm_page busy
[dragonfly.git] / sys / kern / vfs_cluster.c
blob5a6f1e20f326b79f724475c4b229ae2cb61b8a70
1 /*-
2 * Copyright (c) 1993
3 * The Regents of the University of California. All rights reserved.
4 * Modifications/enhancements:
5 * Copyright (c) 1995 John S. Dyson. All rights reserved.
6 * Copyright (c) 2012-2013 Matthew Dillon. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
33 #include "opt_debug_cluster.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/proc.h>
39 #include <sys/buf.h>
40 #include <sys/vnode.h>
41 #include <sys/malloc.h>
42 #include <sys/mount.h>
43 #include <sys/resourcevar.h>
44 #include <sys/vmmeter.h>
45 #include <vm/vm.h>
46 #include <vm/vm_object.h>
47 #include <vm/vm_page.h>
48 #include <sys/sysctl.h>
50 #include <sys/buf2.h>
51 #include <vm/vm_page2.h>
53 #include <machine/limits.h>
56 * Cluster tracking cache - replaces the original vnode v_* fields which had
57 * limited utility and were not MP safe.
59 * The cluster tracking cache is a simple 4-way set-associative non-chained
60 * cache. It is capable of tracking up to four zones separated by 1MB or
61 * more per vnode.
63 * NOTE: We want this structure to be cache-line friendly so the iterator
64 * is embedded rather than in a separate array.
66 * NOTE: A cluster cache entry can become stale when a vnode is recycled.
67 * For now we treat the values as heuristical but also self-consistent.
68 * i.e. the values cannot be completely random and cannot be SMP unsafe
69 * or the cluster code might end-up clustering non-contiguous buffers
70 * at the wrong offsets.
72 struct cluster_cache {
73 struct vnode *vp;
74 u_int locked;
75 off_t v_lastw; /* last write (end) (write cluster) */
76 off_t v_cstart; /* start block (beg) of cluster */
77 off_t v_lasta; /* last allocation (end) */
78 u_int v_clen; /* length of current cluster */
79 u_int iterator;
80 } __cachealign;
82 typedef struct cluster_cache cluster_cache_t;
84 #define CLUSTER_CACHE_SIZE 512
85 #define CLUSTER_CACHE_MASK (CLUSTER_CACHE_SIZE - 1)
87 #define CLUSTER_ZONE ((off_t)(1024 * 1024))
89 cluster_cache_t cluster_array[CLUSTER_CACHE_SIZE];
91 #if defined(CLUSTERDEBUG)
92 #include <sys/sysctl.h>
93 static int rcluster= 0;
94 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
95 #endif
97 static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
99 static struct cluster_save *
100 cluster_collectbufs (cluster_cache_t *cc, struct vnode *vp,
101 struct buf *last_bp, int blksize);
102 static struct buf *
103 cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
104 off_t doffset, int blksize, int run,
105 struct buf *fbp, int *srp);
106 static void cluster_callback (struct bio *);
107 static void cluster_setram (struct buf *);
108 static void cluster_clrram (struct buf *);
109 static int cluster_wbuild(struct vnode *vp, struct buf **bpp, int blksize,
110 off_t start_loffset, int bytes);
112 static int write_behind = 1;
113 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
114 "Cluster write-behind setting");
115 static quad_t write_behind_minfilesize = 10 * 1024 * 1024;
116 SYSCTL_QUAD(_vfs, OID_AUTO, write_behind_minfilesize, CTLFLAG_RW,
117 &write_behind_minfilesize, 0, "Cluster write-behind setting");
118 static int max_readahead = 2 * 1024 * 1024;
119 SYSCTL_INT(_vfs, OID_AUTO, max_readahead, CTLFLAG_RW, &max_readahead, 0,
120 "Limit in bytes for desired cluster read-ahead");
122 extern vm_page_t bogus_page;
125 * nblks is our cluster_rbuild request size. The approximate number of
126 * physical read-ahead requests is maxra / nblks. The physical request
127 * size is limited by the device (maxrbuild). We also do not want to make
128 * the request size too big or it will mess up the B_RAM streaming.
130 static __inline
132 calc_rbuild_reqsize(int maxra, int maxrbuild)
134 int nblks;
136 if ((nblks = maxra / 4) > maxrbuild)
137 nblks = maxrbuild;
138 if (nblks < 1)
139 nblks = maxra;
140 return nblks;
144 * Acquire/release cluster cache (can return dummy entry)
146 static
147 cluster_cache_t *
148 cluster_getcache(cluster_cache_t *dummy, struct vnode *vp, off_t loffset)
150 cluster_cache_t *cc;
151 size_t hv;
152 int i;
153 int xact;
155 hv = (size_t)(intptr_t)vp ^ (size_t)(intptr_t)vp / sizeof(*vp);
156 hv &= CLUSTER_CACHE_MASK & ~3;
157 cc = &cluster_array[hv];
159 xact = -1;
160 for (i = 0; i < 4; ++i) {
161 if (cc[i].vp != vp)
162 continue;
163 if (((cc[i].v_cstart ^ loffset) & ~(CLUSTER_ZONE - 1)) == 0) {
164 xact = i;
165 break;
168 if (xact >= 0 && atomic_swap_int(&cc[xact].locked, 1) == 0) {
169 if (cc[xact].vp == vp &&
170 ((cc[i].v_cstart ^ loffset) & ~(CLUSTER_ZONE - 1)) == 0) {
171 return(&cc[xact]);
173 atomic_swap_int(&cc[xact].locked, 0);
177 * New entry. If we can't acquire the cache line then use the
178 * passed-in dummy element and reset all fields.
180 * When we are able to acquire the cache line we only clear the
181 * fields if the vp does not match. This allows us to multi-zone
182 * a vp and for excessive zones / partial clusters to be retired.
184 i = cc->iterator++ & 3;
185 cc += i;
186 if (atomic_swap_int(&cc->locked, 1) != 0) {
187 cc = dummy;
188 cc->locked = 1;
189 cc->vp = NULL;
191 if (cc->vp != vp) {
192 cc->vp = vp;
193 cc->v_lasta = 0;
194 cc->v_clen = 0;
195 cc->v_cstart = 0;
196 cc->v_lastw = 0;
198 return(cc);
201 static
202 void
203 cluster_putcache(cluster_cache_t *cc)
205 atomic_swap_int(&cc->locked, 0);
209 * This replaces bread(), providing a synchronous read of the requested
210 * buffer plus asynchronous read-ahead within the specified bounds.
212 * The caller may pre-populate *bpp if it already has the requested buffer
213 * in-hand, else must set *bpp to NULL. Note that the cluster_read() inline
214 * sets *bpp to NULL and then calls cluster_readx() for compatibility.
216 * filesize - read-ahead @ blksize will not cross this boundary
217 * loffset - loffset for returned *bpp
218 * blksize - blocksize for returned *bpp and read-ahead bps
219 * minreq - minimum (not a hard minimum) in bytes, typically reflects
220 * a higher level uio resid.
221 * maxreq - maximum (sequential heuristic) in bytes (highet typ ~2MB)
222 * bpp - return buffer (*bpp) for (loffset,blksize)
225 cluster_readx(struct vnode *vp, off_t filesize, off_t loffset, int blksize,
226 int bflags, size_t minreq, size_t maxreq,
227 struct buf **bpp)
229 struct buf *bp, *rbp, *reqbp;
230 off_t origoffset;
231 off_t doffset;
232 int error;
233 int i;
234 int maxra;
235 int maxrbuild;
236 int sr;
237 int blkflags = (bflags & B_KVABIO) ? GETBLK_KVABIO : 0;
239 sr = 0;
242 * Calculate the desired read-ahead in blksize'd blocks (maxra).
243 * To do this we calculate maxreq.
245 * maxreq typically starts out as a sequential heuristic. If the
246 * high level uio/resid is bigger (minreq), we pop maxreq up to
247 * minreq. This represents the case where random I/O is being
248 * performed by the userland is issuing big read()'s.
250 * Then we limit maxreq to max_readahead to ensure it is a reasonable
251 * value.
253 * Finally we must ensure that (loffset + maxreq) does not cross the
254 * boundary (filesize) for the current blocksize. If we allowed it
255 * to cross we could end up with buffers past the boundary with the
256 * wrong block size (HAMMER large-data areas use mixed block sizes).
257 * minreq is also absolutely limited to filesize.
259 if (maxreq < minreq)
260 maxreq = minreq;
261 /* minreq not used beyond this point */
263 if (maxreq > max_readahead) {
264 maxreq = max_readahead;
265 if (maxreq > 16 * 1024 * 1024)
266 maxreq = 16 * 1024 * 1024;
268 if (maxreq < blksize)
269 maxreq = blksize;
270 if (loffset + maxreq > filesize) {
271 if (loffset > filesize)
272 maxreq = 0;
273 else
274 maxreq = filesize - loffset;
277 maxra = (int)(maxreq / blksize);
280 * Get the requested block.
282 if (*bpp)
283 reqbp = bp = *bpp;
284 else
285 *bpp = reqbp = bp = getblk(vp, loffset, blksize, blkflags, 0);
286 origoffset = loffset;
289 * Calculate the maximum cluster size for a single I/O, used
290 * by cluster_rbuild().
292 maxrbuild = vmaxiosize(vp) / blksize;
295 * If it is in the cache, then check to see if the reads have been
296 * sequential. If they have, then try some read-ahead, otherwise
297 * back-off on prospective read-aheads.
299 if (bp->b_flags & B_CACHE) {
301 * Not sequential, do not do any read-ahead
303 if (maxra <= 1)
304 return 0;
307 * No read-ahead mark, do not do any read-ahead
308 * yet.
310 if ((bp->b_flags & B_RAM) == 0)
311 return 0;
314 * We hit a read-ahead-mark, figure out how much read-ahead
315 * to do (maxra) and where to start (loffset).
317 * Typically the way this works is that B_RAM is set in the
318 * middle of the cluster and triggers an overlapping
319 * read-ahead of 1/2 a cluster more blocks. This ensures
320 * that the cluster read-ahead scales with the read-ahead
321 * count and is thus better-able to absorb the caller's
322 * latency.
324 * Estimate where the next unread block will be by assuming
325 * that the B_RAM's are placed at the half-way point.
327 bp->b_flags &= ~B_RAM;
329 i = maxra / 2;
330 rbp = findblk(vp, loffset + i * blksize, FINDBLK_TEST);
331 if (rbp == NULL || (rbp->b_flags & B_CACHE) == 0) {
332 while (i) {
333 --i;
334 rbp = findblk(vp, loffset + i * blksize,
335 FINDBLK_TEST);
336 if (rbp) {
337 ++i;
338 break;
341 } else {
342 while (i < maxra) {
343 rbp = findblk(vp, loffset + i * blksize,
344 FINDBLK_TEST);
345 if (rbp == NULL)
346 break;
347 ++i;
352 * We got everything or everything is in the cache, no
353 * point continuing.
355 if (i >= maxra)
356 return 0;
359 * Calculate where to start the read-ahead and how much
360 * to do. Generally speaking we want to read-ahead by
361 * (maxra) when we've found a read-ahead mark. We do
362 * not want to reduce maxra here as it will cause
363 * successive read-ahead I/O's to be smaller and smaller.
365 * However, we have to make sure we don't break the
366 * filesize limitation for the clustered operation.
368 loffset += i * blksize;
369 reqbp = bp = NULL;
371 if (loffset >= filesize)
372 return 0;
373 if (loffset + maxra * blksize > filesize) {
374 maxreq = filesize - loffset;
375 maxra = (int)(maxreq / blksize);
379 * Set RAM on first read-ahead block since we still have
380 * approximate maxra/2 blocks ahead of us that are already
381 * cached or in-progress.
383 sr = 1;
384 } else {
386 * Start block is not valid, we will want to do a
387 * full read-ahead.
389 __debugvar off_t firstread = bp->b_loffset;
390 int nblks;
393 * Set-up synchronous read for bp.
395 bp->b_cmd = BUF_CMD_READ;
396 bp->b_bio1.bio_done = biodone_sync;
397 bp->b_bio1.bio_flags |= BIO_SYNC;
399 KASSERT(firstread != NOOFFSET,
400 ("cluster_read: no buffer offset"));
402 nblks = calc_rbuild_reqsize(maxra, maxrbuild);
405 * Set RAM half-way through the full-cluster.
407 sr = (maxra + 1) / 2;
409 if (nblks > 1) {
410 int burstbytes;
412 error = VOP_BMAP(vp, loffset, &doffset,
413 &burstbytes, NULL, BUF_CMD_READ);
414 if (error)
415 goto single_block_read;
416 if (nblks > burstbytes / blksize)
417 nblks = burstbytes / blksize;
418 if (doffset == NOOFFSET)
419 goto single_block_read;
420 if (nblks <= 1)
421 goto single_block_read;
423 bp = cluster_rbuild(vp, filesize, loffset,
424 doffset, blksize, nblks, bp, &sr);
425 loffset += bp->b_bufsize;
426 maxra -= bp->b_bufsize / blksize;
427 } else {
428 single_block_read:
430 * If it isn't in the cache, then get a chunk from
431 * disk if sequential, otherwise just get the block.
433 loffset += blksize;
434 --maxra;
439 * If B_CACHE was not set issue bp. bp will either be an
440 * asynchronous cluster buf or a synchronous single-buf.
441 * If it is a single buf it will be the same as reqbp.
443 * NOTE: Once an async cluster buf is issued bp becomes invalid.
445 if (bp) {
446 #if defined(CLUSTERDEBUG)
447 if (rcluster)
448 kprintf("S(%012jx,%d,%d)\n",
449 (intmax_t)bp->b_loffset, bp->b_bcount, maxra);
450 #endif
451 if ((bp->b_flags & B_CLUSTER) == 0)
452 vfs_busy_pages(vp, bp);
453 bp->b_flags &= ~(B_ERROR | B_INVAL | B_NOTMETA);
454 bp->b_flags |= bflags;
455 vn_strategy(vp, &bp->b_bio1);
456 /* bp invalid now */
457 bp = NULL;
460 #if defined(CLUSTERDEBUG)
461 if (rcluster)
462 kprintf("cluster_rd %016jx/%d maxra=%d sr=%d\n",
463 loffset, blksize, maxra, sr);
464 #endif
467 * If we have been doing sequential I/O, then do some read-ahead.
468 * The code above us should have positioned us at the next likely
469 * offset.
471 * Only mess with buffers which we can immediately lock. HAMMER
472 * will do device-readahead irrespective of what the blocks
473 * represent.
475 * Set B_RAM on the first buffer (the next likely offset needing
476 * read-ahead), under the assumption that there are still
477 * approximately maxra/2 blocks good ahead of us.
479 while (maxra > 0) {
480 int burstbytes;
481 int nblks;
483 rbp = getblk(vp, loffset, blksize,
484 GETBLK_SZMATCH | GETBLK_NOWAIT | GETBLK_KVABIO,
486 #if defined(CLUSTERDEBUG)
487 if (rcluster) {
488 kprintf("read-ahead %016jx rbp=%p ",
489 loffset, rbp);
491 #endif
492 if (rbp == NULL)
493 goto no_read_ahead;
494 if ((rbp->b_flags & B_CACHE)) {
495 bqrelse(rbp);
496 goto no_read_ahead;
500 * If BMAP is not supported or has an issue, we still do
501 * (maxra) read-ahead, but we do not try to use rbuild.
503 error = VOP_BMAP(vp, loffset, &doffset,
504 &burstbytes, NULL, BUF_CMD_READ);
505 if (error || doffset == NOOFFSET) {
506 nblks = 1;
507 doffset = NOOFFSET;
508 } else {
509 nblks = calc_rbuild_reqsize(maxra, maxrbuild);
510 if (nblks > burstbytes / blksize)
511 nblks = burstbytes / blksize;
513 rbp->b_cmd = BUF_CMD_READ;
515 if (nblks > 1) {
516 rbp = cluster_rbuild(vp, filesize, loffset,
517 doffset, blksize,
518 nblks, rbp, &sr);
519 } else {
520 rbp->b_bio2.bio_offset = doffset;
521 if (--sr == 0)
522 cluster_setram(rbp);
525 rbp->b_flags &= ~(B_ERROR | B_INVAL | B_NOTMETA);
526 rbp->b_flags |= bflags;
528 if ((rbp->b_flags & B_CLUSTER) == 0)
529 vfs_busy_pages(vp, rbp);
530 BUF_KERNPROC(rbp);
531 loffset += rbp->b_bufsize;
532 maxra -= rbp->b_bufsize / blksize;
533 vn_strategy(vp, &rbp->b_bio1);
534 /* rbp invalid now */
538 * Wait for our original buffer to complete its I/O. reqbp will
539 * be NULL if the original buffer was B_CACHE. We are returning
540 * (*bpp) which is the same as reqbp when reqbp != NULL.
542 no_read_ahead:
543 if (reqbp) {
544 KKASSERT(reqbp->b_bio1.bio_flags & BIO_SYNC);
545 error = biowait(&reqbp->b_bio1, "clurd");
546 } else {
547 error = 0;
549 return (error);
553 * This replaces breadcb(), providing an asynchronous read of the requested
554 * buffer with a callback, plus an asynchronous read-ahead within the
555 * specified bounds.
557 * The callback must check whether BIO_DONE is set in the bio and issue
558 * the bpdone(bp, 0) if it isn't. The callback is responsible for clearing
559 * BIO_DONE and disposing of the I/O (bqrelse()ing it).
561 * filesize - read-ahead @ blksize will not cross this boundary
562 * loffset - loffset for returned *bpp
563 * blksize - blocksize for returned *bpp and read-ahead bps
564 * minreq - minimum (not a hard minimum) in bytes, typically reflects
565 * a higher level uio resid.
566 * maxreq - maximum (sequential heuristic) in bytes (highet typ ~2MB)
567 * bpp - return buffer (*bpp) for (loffset,blksize)
569 void
570 cluster_readcb(struct vnode *vp, off_t filesize, off_t loffset, int blksize,
571 int bflags, size_t minreq, size_t maxreq,
572 void (*func)(struct bio *), void *arg)
574 struct buf *bp, *rbp, *reqbp;
575 off_t origoffset;
576 off_t doffset;
577 int i;
578 int maxra;
579 int maxrbuild;
580 int sr;
581 int blkflags = (bflags & B_KVABIO) ? GETBLK_KVABIO : 0;
583 sr = 0;
586 * Calculate the desired read-ahead in blksize'd blocks (maxra).
587 * To do this we calculate maxreq.
589 * maxreq typically starts out as a sequential heuristic. If the
590 * high level uio/resid is bigger (minreq), we pop maxreq up to
591 * minreq. This represents the case where random I/O is being
592 * performed by the userland is issuing big read()'s.
594 * Then we limit maxreq to max_readahead to ensure it is a reasonable
595 * value.
597 * Finally we must ensure that (loffset + maxreq) does not cross the
598 * boundary (filesize) for the current blocksize. If we allowed it
599 * to cross we could end up with buffers past the boundary with the
600 * wrong block size (HAMMER large-data areas use mixed block sizes).
601 * minreq is also absolutely limited to filesize.
603 if (maxreq < minreq)
604 maxreq = minreq;
605 /* minreq not used beyond this point */
607 if (maxreq > max_readahead) {
608 maxreq = max_readahead;
609 if (maxreq > 16 * 1024 * 1024)
610 maxreq = 16 * 1024 * 1024;
612 if (maxreq < blksize)
613 maxreq = blksize;
614 if (loffset + maxreq > filesize) {
615 if (loffset > filesize)
616 maxreq = 0;
617 else
618 maxreq = filesize - loffset;
621 maxra = (int)(maxreq / blksize);
624 * Get the requested block.
626 reqbp = bp = getblk(vp, loffset, blksize, blkflags, 0);
627 origoffset = loffset;
630 * Calculate the maximum cluster size for a single I/O, used
631 * by cluster_rbuild().
633 maxrbuild = vmaxiosize(vp) / blksize;
636 * if it is in the cache, then check to see if the reads have been
637 * sequential. If they have, then try some read-ahead, otherwise
638 * back-off on prospective read-aheads.
640 if (bp->b_flags & B_CACHE) {
642 * Setup for func() call whether we do read-ahead or not.
644 bp->b_bio1.bio_caller_info1.ptr = arg;
645 bp->b_bio1.bio_flags |= BIO_DONE;
648 * Not sequential, do not do any read-ahead
650 if (maxra <= 1)
651 goto no_read_ahead;
654 * No read-ahead mark, do not do any read-ahead
655 * yet.
657 if ((bp->b_flags & B_RAM) == 0)
658 goto no_read_ahead;
659 bp->b_flags &= ~B_RAM;
662 * We hit a read-ahead-mark, figure out how much read-ahead
663 * to do (maxra) and where to start (loffset).
665 * Shortcut the scan. Typically the way this works is that
666 * we've built up all the blocks inbetween except for the
667 * last in previous iterations, so if the second-to-last
668 * block is present we just skip ahead to it.
670 * This algorithm has O(1) cpu in the steady state no
671 * matter how large maxra is.
673 if (findblk(vp, loffset + (maxra - 2) * blksize, FINDBLK_TEST))
674 i = maxra - 1;
675 else
676 i = 1;
677 while (i < maxra) {
678 if (findblk(vp, loffset + i * blksize,
679 FINDBLK_TEST) == NULL) {
680 break;
682 ++i;
686 * We got everything or everything is in the cache, no
687 * point continuing.
689 if (i >= maxra)
690 goto no_read_ahead;
693 * Calculate where to start the read-ahead and how much
694 * to do. Generally speaking we want to read-ahead by
695 * (maxra) when we've found a read-ahead mark. We do
696 * not want to reduce maxra here as it will cause
697 * successive read-ahead I/O's to be smaller and smaller.
699 * However, we have to make sure we don't break the
700 * filesize limitation for the clustered operation.
702 loffset += i * blksize;
703 bp = NULL;
704 /* leave reqbp intact to force function callback */
706 if (loffset >= filesize)
707 goto no_read_ahead;
708 if (loffset + maxra * blksize > filesize) {
709 maxreq = filesize - loffset;
710 maxra = (int)(maxreq / blksize);
712 sr = 1;
713 } else {
715 * bp is not valid, no prior cluster in progress so get a
716 * full cluster read-ahead going.
718 __debugvar off_t firstread = bp->b_loffset;
719 int nblks;
720 int error;
723 * Set-up synchronous read for bp.
725 bp->b_flags &= ~(B_ERROR | B_EINTR | B_INVAL | B_NOTMETA);
726 bp->b_flags |= bflags;
727 bp->b_cmd = BUF_CMD_READ;
728 bp->b_bio1.bio_done = func;
729 bp->b_bio1.bio_caller_info1.ptr = arg;
730 BUF_KERNPROC(bp);
731 reqbp = NULL; /* don't func() reqbp, it's running async */
733 KASSERT(firstread != NOOFFSET,
734 ("cluster_read: no buffer offset"));
737 * nblks is our cluster_rbuild request size, limited
738 * primarily by the device.
740 nblks = calc_rbuild_reqsize(maxra, maxrbuild);
743 * Set RAM half-way through the full-cluster.
745 sr = (maxra + 1) / 2;
747 if (nblks > 1) {
748 int burstbytes;
750 error = VOP_BMAP(vp, loffset, &doffset,
751 &burstbytes, NULL, BUF_CMD_READ);
752 if (error)
753 goto single_block_read;
754 if (nblks > burstbytes / blksize)
755 nblks = burstbytes / blksize;
756 if (doffset == NOOFFSET)
757 goto single_block_read;
758 if (nblks <= 1)
759 goto single_block_read;
761 bp = cluster_rbuild(vp, filesize, loffset,
762 doffset, blksize, nblks, bp, &sr);
763 loffset += bp->b_bufsize;
764 maxra -= bp->b_bufsize / blksize;
765 } else {
766 single_block_read:
768 * If it isn't in the cache, then get a chunk from
769 * disk if sequential, otherwise just get the block.
771 loffset += blksize;
772 --maxra;
777 * If bp != NULL then B_CACHE was *NOT* set and bp must be issued.
778 * bp will either be an asynchronous cluster buf or an asynchronous
779 * single-buf.
781 * NOTE: Once an async cluster buf is issued bp becomes invalid.
783 if (bp) {
784 #if defined(CLUSTERDEBUG)
785 if (rcluster)
786 kprintf("S(%012jx,%d,%d)\n",
787 (intmax_t)bp->b_loffset, bp->b_bcount, maxra);
788 #endif
789 if ((bp->b_flags & B_CLUSTER) == 0)
790 vfs_busy_pages(vp, bp);
791 bp->b_flags &= ~(B_ERROR | B_INVAL | B_NOTMETA);
792 bp->b_flags |= bflags;
793 vn_strategy(vp, &bp->b_bio1);
794 /* bp invalid now */
795 bp = NULL;
798 #if defined(CLUSTERDEBUG)
799 if (rcluster)
800 kprintf("cluster_rd %016jx/%d maxra=%d sr=%d\n",
801 loffset, blksize, maxra, sr);
802 #endif
805 * If we have been doing sequential I/O, then do some read-ahead.
806 * The code above us should have positioned us at the next likely
807 * offset.
809 * Only mess with buffers which we can immediately lock. HAMMER
810 * will do device-readahead irrespective of what the blocks
811 * represent.
813 while (maxra > 0) {
814 int burstbytes;
815 int error;
816 int nblks;
818 rbp = getblk(vp, loffset, blksize,
819 GETBLK_SZMATCH | GETBLK_NOWAIT | GETBLK_KVABIO,
821 if (rbp == NULL)
822 goto no_read_ahead;
823 if ((rbp->b_flags & B_CACHE)) {
824 bqrelse(rbp);
825 goto no_read_ahead;
829 * If BMAP is not supported or has an issue, we still do
830 * (maxra) read-ahead, but we do not try to use rbuild.
832 error = VOP_BMAP(vp, loffset, &doffset,
833 &burstbytes, NULL, BUF_CMD_READ);
834 if (error || doffset == NOOFFSET) {
835 nblks = 1;
836 doffset = NOOFFSET;
837 } else {
838 nblks = calc_rbuild_reqsize(maxra, maxrbuild);
839 if (nblks > burstbytes / blksize)
840 nblks = burstbytes / blksize;
842 rbp->b_cmd = BUF_CMD_READ;
844 if (nblks > 1) {
845 rbp = cluster_rbuild(vp, filesize, loffset,
846 doffset, blksize,
847 nblks, rbp, &sr);
848 } else {
849 rbp->b_bio2.bio_offset = doffset;
850 if (--sr == 0)
851 cluster_setram(rbp);
854 rbp->b_flags &= ~(B_ERROR | B_INVAL | B_NOTMETA);
855 rbp->b_flags |= bflags;
857 if ((rbp->b_flags & B_CLUSTER) == 0)
858 vfs_busy_pages(vp, rbp);
859 BUF_KERNPROC(rbp);
860 loffset += rbp->b_bufsize;
861 maxra -= rbp->b_bufsize / blksize;
862 vn_strategy(vp, &rbp->b_bio1);
863 /* rbp invalid now */
867 * If reqbp is non-NULL it had B_CACHE set and we issue the
868 * function callback synchronously.
870 * Note that we may start additional asynchronous I/O before doing
871 * the func() callback for the B_CACHE case
873 no_read_ahead:
874 if (reqbp)
875 func(&reqbp->b_bio1);
879 * If blocks are contiguous on disk, use this to provide clustered
880 * read ahead. We will read as many blocks as possible sequentially
881 * and then parcel them up into logical blocks in the buffer hash table.
883 * This function either returns a cluster buf or it returns fbp. fbp is
884 * already expected to be set up as a synchronous or asynchronous request.
886 * If a cluster buf is returned it will always be async.
888 * (*srp) counts down original blocks to determine where B_RAM should be set.
889 * Set B_RAM when *srp drops to 0. If (*srp) starts at 0, B_RAM will not be
890 * set on any buffer. Make sure B_RAM is cleared on any other buffers to
891 * prevent degenerate read-aheads from being generated.
893 static struct buf *
894 cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset, off_t doffset,
895 int blksize, int run, struct buf *fbp, int *srp)
897 struct buf *bp, *tbp;
898 off_t boffset;
899 int i, j;
900 int maxiosize = vmaxiosize(vp);
903 * avoid a division
905 while (loffset + run * blksize > filesize) {
906 --run;
909 tbp = fbp;
910 tbp->b_bio2.bio_offset = doffset;
911 if (((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
912 if (--*srp == 0)
913 cluster_setram(tbp);
914 else
915 cluster_clrram(tbp);
916 return tbp;
920 * Get a pbuf, limit cluster I/O on a per-device basis. If
921 * doing cluster I/O for a file, limit cluster I/O on a
922 * per-mount basis.
924 if (vp->v_type == VCHR || vp->v_type == VBLK)
925 bp = trypbuf_kva(&vp->v_pbuf_count);
926 else
927 bp = trypbuf_kva(&vp->v_mount->mnt_pbuf_count);
929 if (bp == NULL)
930 return tbp;
933 * We are synthesizing a buffer out of vm_page_t's, but
934 * if the block size is not page aligned then the starting
935 * address may not be either. Inherit the b_data offset
936 * from the original buffer.
938 bp->b_vp = vp;
939 bp->b_data = (char *)((vm_offset_t)bp->b_data |
940 ((vm_offset_t)tbp->b_data & PAGE_MASK));
941 bp->b_flags |= B_CLUSTER | B_VMIO | B_KVABIO;
942 bp->b_cmd = BUF_CMD_READ;
943 bp->b_bio1.bio_done = cluster_callback; /* default to async */
944 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
945 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
946 bp->b_loffset = loffset;
947 bp->b_bio2.bio_offset = doffset;
948 KASSERT(bp->b_loffset != NOOFFSET,
949 ("cluster_rbuild: no buffer offset"));
951 bp->b_bcount = 0;
952 bp->b_bufsize = 0;
953 bp->b_xio.xio_npages = 0;
955 for (boffset = doffset, i = 0; i < run; ++i, boffset += blksize) {
956 if (i) {
957 if ((bp->b_xio.xio_npages * PAGE_SIZE) +
958 round_page(blksize) > maxiosize) {
959 break;
963 * Shortcut some checks and try to avoid buffers that
964 * would block in the lock. The same checks have to
965 * be made again after we officially get the buffer.
967 tbp = getblk(vp, loffset + i * blksize, blksize,
968 GETBLK_SZMATCH |
969 GETBLK_NOWAIT |
970 GETBLK_KVABIO,
972 if (tbp == NULL)
973 break;
974 for (j = 0; j < tbp->b_xio.xio_npages; j++) {
975 if (tbp->b_xio.xio_pages[j]->valid)
976 break;
978 if (j != tbp->b_xio.xio_npages) {
979 bqrelse(tbp);
980 break;
984 * Stop scanning if the buffer is fuly valid
985 * (marked B_CACHE), or locked (may be doing a
986 * background write), or if the buffer is not
987 * VMIO backed. The clustering code can only deal
988 * with VMIO-backed buffers.
990 if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
991 (tbp->b_flags & B_VMIO) == 0 ||
992 (LIST_FIRST(&tbp->b_dep) != NULL &&
993 buf_checkread(tbp))
995 bqrelse(tbp);
996 break;
1000 * The buffer must be completely invalid in order to
1001 * take part in the cluster. If it is partially valid
1002 * then we stop.
1004 for (j = 0;j < tbp->b_xio.xio_npages; j++) {
1005 if (tbp->b_xio.xio_pages[j]->valid)
1006 break;
1008 if (j != tbp->b_xio.xio_npages) {
1009 bqrelse(tbp);
1010 break;
1014 * Depress the priority of buffers not explicitly
1015 * requested.
1017 /* tbp->b_flags |= B_AGE; */
1020 * Set the block number if it isn't set, otherwise
1021 * if it is make sure it matches the block number we
1022 * expect.
1024 if (tbp->b_bio2.bio_offset == NOOFFSET) {
1025 tbp->b_bio2.bio_offset = boffset;
1026 } else if (tbp->b_bio2.bio_offset != boffset) {
1027 brelse(tbp);
1028 break;
1033 * Set B_RAM if (*srp) is 1. B_RAM is only set on one buffer
1034 * in the cluster, including potentially the first buffer
1035 * once we start streaming the read-aheads.
1037 if (--*srp == 0)
1038 cluster_setram(tbp);
1039 else
1040 cluster_clrram(tbp);
1043 * The passed-in tbp (i == 0) will already be set up for
1044 * async or sync operation. All other tbp's acquire in
1045 * our loop are set up for async operation.
1047 tbp->b_cmd = BUF_CMD_READ;
1048 BUF_KERNPROC(tbp);
1049 cluster_append(&bp->b_bio1, tbp);
1050 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
1051 vm_page_t m;
1053 m = tbp->b_xio.xio_pages[j];
1054 vm_page_busy_wait(m, FALSE, "clurpg");
1055 vm_page_io_start(m);
1056 vm_page_wakeup(m);
1057 vm_object_pip_add(m->object, 1);
1058 if ((bp->b_xio.xio_npages == 0) ||
1059 (bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
1060 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
1061 bp->b_xio.xio_npages++;
1063 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) {
1064 tbp->b_xio.xio_pages[j] = bogus_page;
1065 tbp->b_flags |= B_HASBOGUS;
1069 * XXX shouldn't this be += size for both, like in
1070 * cluster_wbuild()?
1072 * Don't inherit tbp->b_bufsize as it may be larger due to
1073 * a non-page-aligned size. Instead just aggregate using
1074 * 'size'.
1076 if (tbp->b_bcount != blksize)
1077 kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, blksize);
1078 if (tbp->b_bufsize != blksize)
1079 kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, blksize);
1080 bp->b_bcount += blksize;
1081 bp->b_bufsize += blksize;
1085 * Fully valid pages in the cluster are already good and do not need
1086 * to be re-read from disk. Replace the page with bogus_page
1088 for (j = 0; j < bp->b_xio.xio_npages; j++) {
1089 if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
1090 VM_PAGE_BITS_ALL) {
1091 bp->b_xio.xio_pages[j] = bogus_page;
1092 bp->b_flags |= B_HASBOGUS;
1095 if (bp->b_bufsize > bp->b_kvasize) {
1096 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
1097 bp->b_bufsize, bp->b_kvasize);
1099 pmap_qenter_noinval(trunc_page((vm_offset_t)bp->b_data),
1100 (vm_page_t *)bp->b_xio.xio_pages,
1101 bp->b_xio.xio_npages);
1102 BUF_KERNPROC(bp);
1103 return (bp);
1107 * Cleanup after a clustered read or write.
1108 * This is complicated by the fact that any of the buffers might have
1109 * extra memory (if there were no empty buffer headers at allocbuf time)
1110 * that we will need to shift around.
1112 * The returned bio is &bp->b_bio1
1114 static void
1115 cluster_callback(struct bio *bio)
1117 struct buf *bp = bio->bio_buf;
1118 struct buf *tbp;
1119 struct vnode *vp;
1120 int error = 0;
1123 * Must propogate errors to all the components. A short read (EOF)
1124 * is a critical error.
1126 if (bp->b_flags & B_ERROR) {
1127 error = bp->b_error;
1128 } else if (bp->b_bcount != bp->b_bufsize) {
1129 panic("cluster_callback: unexpected EOF on cluster %p!", bio);
1132 pmap_qremove_noinval(trunc_page((vm_offset_t) bp->b_data),
1133 bp->b_xio.xio_npages);
1135 * Move memory from the large cluster buffer into the component
1136 * buffers and mark IO as done on these. Since the memory map
1137 * is the same, no actual copying is required.
1139 while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) {
1140 bio->bio_caller_info1.cluster_head = tbp->b_cluster_next;
1141 if (error) {
1142 tbp->b_flags |= B_ERROR | B_IOISSUED;
1143 tbp->b_error = error;
1144 } else {
1145 tbp->b_dirtyoff = tbp->b_dirtyend = 0;
1146 tbp->b_flags &= ~(B_ERROR | B_INVAL);
1147 if (tbp->b_cmd == BUF_CMD_READ) {
1148 tbp->b_flags = (tbp->b_flags & ~B_NOTMETA) |
1149 (bp->b_flags & B_NOTMETA);
1151 tbp->b_flags |= B_IOISSUED;
1153 * XXX the bdwrite()/bqrelse() issued during
1154 * cluster building clears B_RELBUF (see bqrelse()
1155 * comment). If direct I/O was specified, we have
1156 * to restore it here to allow the buffer and VM
1157 * to be freed.
1159 if (tbp->b_flags & B_DIRECT)
1160 tbp->b_flags |= B_RELBUF;
1163 * XXX I think biodone() below will do this, but do
1164 * it here anyway for consistency.
1166 if (tbp->b_cmd == BUF_CMD_WRITE)
1167 bundirty(tbp);
1169 biodone(&tbp->b_bio1);
1171 vp = bp->b_vp;
1172 bp->b_vp = NULL;
1173 if (vp->v_type == VCHR || vp->v_type == VBLK)
1174 relpbuf(bp, &vp->v_pbuf_count);
1175 else
1176 relpbuf(bp, &vp->v_mount->mnt_pbuf_count);
1180 * Implement modified write build for cluster.
1182 * write_behind = 0 write behind disabled
1183 * write_behind = 1 write behind normal (default)
1184 * write_behind = 2 write behind backed-off
1186 * In addition, write_behind is only activated for files that have
1187 * grown past a certain size (default 10MB). Otherwise temporary files
1188 * wind up generating a lot of unnecessary disk I/O.
1190 static __inline int
1191 cluster_wbuild_wb(struct vnode *vp, int blksize, off_t start_loffset, int len)
1193 int r = 0;
1195 switch(write_behind) {
1196 case 2:
1197 if (start_loffset < len)
1198 break;
1199 start_loffset -= len;
1200 /* fall through */
1201 case 1:
1202 if (vp->v_filesize >= write_behind_minfilesize) {
1203 r = cluster_wbuild(vp, NULL, blksize,
1204 start_loffset, len);
1206 /* fall through */
1207 default:
1208 /* fall through */
1209 break;
1211 return(r);
1215 * Do clustered write for FFS.
1217 * Three cases:
1218 * 1. Write is not sequential (write asynchronously)
1219 * Write is sequential:
1220 * 2. beginning of cluster - begin cluster
1221 * 3. middle of a cluster - add to cluster
1222 * 4. end of a cluster - asynchronously write cluster
1224 * WARNING! vnode fields are not locked and must ONLY be used heuristically.
1226 void
1227 cluster_write(struct buf *bp, off_t filesize, int blksize, int seqcount)
1229 struct vnode *vp;
1230 off_t loffset;
1231 int maxclen, cursize;
1232 int async;
1233 cluster_cache_t dummy;
1234 cluster_cache_t *cc;
1236 vp = bp->b_vp;
1237 if (vp->v_type == VREG)
1238 async = vp->v_mount->mnt_flag & MNT_ASYNC;
1239 else
1240 async = 0;
1241 loffset = bp->b_loffset;
1242 KASSERT(bp->b_loffset != NOOFFSET,
1243 ("cluster_write: no buffer offset"));
1245 cc = cluster_getcache(&dummy, vp, loffset);
1248 * Initialize vnode to beginning of file.
1250 if (loffset == 0)
1251 cc->v_lasta = cc->v_clen = cc->v_cstart = cc->v_lastw = 0;
1253 if (cc->v_clen == 0 || loffset != cc->v_lastw ||
1254 (bp->b_bio2.bio_offset != NOOFFSET &&
1255 (bp->b_bio2.bio_offset != cc->v_lasta))) {
1257 * Next block is not logically sequential, or, if physical
1258 * block offsets are available, not physically sequential.
1260 * If physical block offsets are not available we only
1261 * get here if we weren't logically sequential.
1263 maxclen = vmaxiosize(vp);
1264 if (cc->v_clen != 0) {
1266 * Next block is not sequential.
1268 * If we are not writing at end of file, the process
1269 * seeked to another point in the file since its last
1270 * write, or we have reached our maximum cluster size,
1271 * then push the previous cluster. Otherwise try
1272 * reallocating to make it sequential.
1274 * Change to algorithm: only push previous cluster if
1275 * it was sequential from the point of view of the
1276 * seqcount heuristic, otherwise leave the buffer
1277 * intact so we can potentially optimize the I/O
1278 * later on in the buf_daemon or update daemon
1279 * flush.
1281 cursize = cc->v_lastw - cc->v_cstart;
1282 if (bp->b_loffset + blksize < filesize ||
1283 loffset != cc->v_lastw ||
1284 cc->v_clen <= cursize) {
1285 if (!async && seqcount > 0) {
1286 cluster_wbuild_wb(vp, blksize,
1287 cc->v_cstart, cursize);
1289 } else {
1290 struct buf **bpp, **endbp;
1291 struct cluster_save *buflist;
1293 buflist = cluster_collectbufs(cc, vp,
1294 bp, blksize);
1295 endbp = &buflist->bs_children
1296 [buflist->bs_nchildren - 1];
1297 if (VOP_REALLOCBLKS(vp, buflist)) {
1299 * Failed, push the previous cluster
1300 * if *really* writing sequentially
1301 * in the logical file (seqcount > 1),
1302 * otherwise delay it in the hopes that
1303 * the low level disk driver can
1304 * optimize the write ordering.
1306 * NOTE: We do not brelse the last
1307 * element which is bp, and we
1308 * do not return here.
1310 for (bpp = buflist->bs_children;
1311 bpp < endbp; bpp++)
1312 brelse(*bpp);
1313 kfree(buflist, M_SEGMENT);
1314 if (seqcount > 1) {
1315 cluster_wbuild_wb(vp,
1316 blksize, cc->v_cstart,
1317 cursize);
1319 } else {
1321 * Succeeded, keep building cluster.
1323 for (bpp = buflist->bs_children;
1324 bpp <= endbp; bpp++)
1325 bdwrite(*bpp);
1326 kfree(buflist, M_SEGMENT);
1327 cc->v_lastw = loffset + blksize;
1328 cc->v_lasta = bp->b_bio2.bio_offset +
1329 blksize;
1330 cluster_putcache(cc);
1331 return;
1337 * Consider beginning a cluster. If at end of file, make
1338 * cluster as large as possible, otherwise find size of
1339 * existing cluster.
1341 if ((vp->v_type == VREG) &&
1342 bp->b_loffset + blksize < filesize &&
1343 (bp->b_bio2.bio_offset == NOOFFSET) &&
1344 (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL, BUF_CMD_WRITE) ||
1345 bp->b_bio2.bio_offset == NOOFFSET)) {
1346 bdwrite(bp);
1347 cc->v_clen = 0;
1348 cc->v_lasta = bp->b_bio2.bio_offset + blksize;
1349 cc->v_cstart = loffset;
1350 cc->v_lastw = loffset + blksize;
1351 cluster_putcache(cc);
1352 return;
1354 if (maxclen > blksize)
1355 cc->v_clen = maxclen;
1356 else
1357 cc->v_clen = blksize;
1358 if (!async && cc->v_clen == 0) { /* I/O not contiguous */
1359 cc->v_cstart = loffset;
1360 bdwrite(bp);
1361 } else { /* Wait for rest of cluster */
1362 cc->v_cstart = loffset;
1363 bdwrite(bp);
1365 } else if (loffset == cc->v_cstart + cc->v_clen) {
1367 * At end of cluster, write it out if seqcount tells us we
1368 * are operating sequentially, otherwise let the buf or
1369 * update daemon handle it.
1371 bdwrite(bp);
1372 if (seqcount > 1)
1373 cluster_wbuild_wb(vp, blksize, cc->v_cstart,
1374 cc->v_clen + blksize);
1375 cc->v_clen = 0;
1376 cc->v_cstart = loffset;
1377 } else if (vm_page_count_severe() &&
1378 bp->b_loffset + blksize < filesize) {
1380 * We are low on memory, get it going NOW. However, do not
1381 * try to push out a partial block at the end of the file
1382 * as this could lead to extremely non-optimal write activity.
1384 bawrite(bp);
1385 } else {
1387 * In the middle of a cluster, so just delay the I/O for now.
1389 bdwrite(bp);
1391 cc->v_lastw = loffset + blksize;
1392 cc->v_lasta = bp->b_bio2.bio_offset + blksize;
1393 cluster_putcache(cc);
1397 * This is the clustered version of bawrite(). It works similarly to
1398 * cluster_write() except I/O on the buffer is guaranteed to occur.
1401 cluster_awrite(struct buf *bp)
1403 int total;
1406 * Don't bother if it isn't clusterable.
1408 if ((bp->b_flags & B_CLUSTEROK) == 0 ||
1409 bp->b_vp == NULL ||
1410 (bp->b_vp->v_flag & VOBJBUF) == 0) {
1411 total = bp->b_bufsize;
1412 bawrite(bp);
1413 return (total);
1416 total = cluster_wbuild(bp->b_vp, &bp, bp->b_bufsize,
1417 bp->b_loffset, vmaxiosize(bp->b_vp));
1420 * If bp is still non-NULL then cluster_wbuild() did not initiate
1421 * I/O on it and we must do so here to provide the API guarantee.
1423 if (bp)
1424 bawrite(bp);
1426 return total;
1430 * This is an awful lot like cluster_rbuild...wish they could be combined.
1431 * The last lbn argument is the current block on which I/O is being
1432 * performed. Check to see that it doesn't fall in the middle of
1433 * the current block (if last_bp == NULL).
1435 * cluster_wbuild() normally does not guarantee anything. If bpp is
1436 * non-NULL and cluster_wbuild() is able to incorporate it into the
1437 * I/O it will set *bpp to NULL, otherwise it will leave it alone and
1438 * the caller must dispose of *bpp.
1440 static int
1441 cluster_wbuild(struct vnode *vp, struct buf **bpp,
1442 int blksize, off_t start_loffset, int bytes)
1444 struct buf *bp, *tbp;
1445 int i, j;
1446 int totalwritten = 0;
1447 int must_initiate;
1448 int maxiosize = vmaxiosize(vp);
1450 while (bytes > 0) {
1452 * If the buffer matches the passed locked & removed buffer
1453 * we used the passed buffer (which might not be B_DELWRI).
1455 * Otherwise locate the buffer and determine if it is
1456 * compatible.
1458 if (bpp && (*bpp)->b_loffset == start_loffset) {
1459 tbp = *bpp;
1460 *bpp = NULL;
1461 bpp = NULL;
1462 } else {
1463 tbp = findblk(vp, start_loffset, FINDBLK_NBLOCK |
1464 FINDBLK_KVABIO);
1465 if (tbp == NULL ||
1466 (tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) !=
1467 B_DELWRI ||
1468 (LIST_FIRST(&tbp->b_dep) && buf_checkwrite(tbp))) {
1469 if (tbp)
1470 BUF_UNLOCK(tbp);
1471 start_loffset += blksize;
1472 bytes -= blksize;
1473 continue;
1475 bremfree(tbp);
1477 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
1480 * Extra memory in the buffer, punt on this buffer.
1481 * XXX we could handle this in most cases, but we would
1482 * have to push the extra memory down to after our max
1483 * possible cluster size and then potentially pull it back
1484 * up if the cluster was terminated prematurely--too much
1485 * hassle.
1487 if ((tbp->b_flags & B_CLUSTEROK) == 0 ||
1488 tbp->b_bcount != tbp->b_bufsize ||
1489 tbp->b_bcount != blksize ||
1490 bytes == blksize) {
1491 totalwritten += tbp->b_bufsize;
1492 bawrite(tbp);
1493 start_loffset += blksize;
1494 bytes -= blksize;
1495 continue;
1499 * Get a pbuf, limit cluster I/O on a per-device basis. If
1500 * doing cluster I/O for a file, limit cluster I/O on a
1501 * per-mount basis.
1503 * HAMMER and other filesystems may attempt to queue a massive
1504 * amount of write I/O, using trypbuf() here easily results in
1505 * situation where the I/O stream becomes non-clustered.
1507 if (vp->v_type == VCHR || vp->v_type == VBLK)
1508 bp = getpbuf_kva(&vp->v_pbuf_count);
1509 else
1510 bp = getpbuf_kva(&vp->v_mount->mnt_pbuf_count);
1513 * Set up the pbuf. Track our append point with b_bcount
1514 * and b_bufsize. b_bufsize is not used by the device but
1515 * our caller uses it to loop clusters and we use it to
1516 * detect a premature EOF on the block device.
1518 bp->b_bcount = 0;
1519 bp->b_bufsize = 0;
1520 bp->b_xio.xio_npages = 0;
1521 bp->b_loffset = tbp->b_loffset;
1522 bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
1523 bp->b_vp = vp;
1526 * We are synthesizing a buffer out of vm_page_t's, but
1527 * if the block size is not page aligned then the starting
1528 * address may not be either. Inherit the b_data offset
1529 * from the original buffer.
1531 bp->b_data = (char *)((vm_offset_t)bp->b_data |
1532 ((vm_offset_t)tbp->b_data & PAGE_MASK));
1533 bp->b_flags &= ~(B_ERROR | B_NOTMETA);
1534 bp->b_flags |= B_CLUSTER | B_BNOCLIP | B_KVABIO |
1535 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT |
1536 B_NOTMETA));
1537 bp->b_bio1.bio_caller_info1.cluster_head = NULL;
1538 bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
1541 * From this location in the file, scan forward to see
1542 * if there are buffers with adjacent data that need to
1543 * be written as well.
1545 * IO *must* be initiated on index 0 at this point
1546 * (particularly when called from cluster_awrite()).
1548 for (i = 0; i < bytes; (i += blksize), (start_loffset += blksize)) {
1549 if (i == 0) {
1550 must_initiate = 1;
1551 } else {
1553 * Not first buffer.
1555 must_initiate = 0;
1556 tbp = findblk(vp, start_loffset,
1557 FINDBLK_NBLOCK | FINDBLK_KVABIO);
1559 * Buffer not found or could not be locked
1560 * non-blocking.
1562 if (tbp == NULL)
1563 break;
1566 * If it IS in core, but has different
1567 * characteristics, then don't cluster
1568 * with it.
1570 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
1571 B_INVAL | B_DELWRI | B_NEEDCOMMIT))
1572 != (B_DELWRI | B_CLUSTEROK |
1573 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
1574 (tbp->b_flags & B_LOCKED)
1576 BUF_UNLOCK(tbp);
1577 break;
1581 * Check that the combined cluster
1582 * would make sense with regard to pages
1583 * and would not be too large
1585 * WARNING! buf_checkwrite() must be the last
1586 * check made. If it returns 0 then
1587 * we must initiate the I/O.
1589 if ((tbp->b_bcount != blksize) ||
1590 ((bp->b_bio2.bio_offset + i) !=
1591 tbp->b_bio2.bio_offset) ||
1592 ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
1593 (maxiosize / PAGE_SIZE)) ||
1594 (LIST_FIRST(&tbp->b_dep) &&
1595 buf_checkwrite(tbp))
1597 BUF_UNLOCK(tbp);
1598 break;
1600 if (LIST_FIRST(&tbp->b_dep))
1601 must_initiate = 1;
1603 * Ok, it's passed all the tests,
1604 * so remove it from the free list
1605 * and mark it busy. We will use it.
1607 bremfree(tbp);
1608 KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
1612 * If the IO is via the VM then we do some
1613 * special VM hackery (yuck). Since the buffer's
1614 * block size may not be page-aligned it is possible
1615 * for a page to be shared between two buffers. We
1616 * have to get rid of the duplication when building
1617 * the cluster.
1619 if (tbp->b_flags & B_VMIO) {
1620 vm_page_t m;
1623 * Try to avoid deadlocks with the VM system.
1624 * However, we cannot abort the I/O if
1625 * must_initiate is non-zero.
1627 if (must_initiate == 0) {
1628 for (j = 0;
1629 j < tbp->b_xio.xio_npages;
1630 ++j) {
1631 m = tbp->b_xio.xio_pages[j];
1632 if (m->busy_count &
1633 PBUSY_LOCKED) {
1634 bqrelse(tbp);
1635 goto finishcluster;
1640 for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
1641 m = tbp->b_xio.xio_pages[j];
1642 vm_page_busy_wait(m, FALSE, "clurpg");
1643 vm_page_io_start(m);
1644 vm_page_wakeup(m);
1645 vm_object_pip_add(m->object, 1);
1646 if ((bp->b_xio.xio_npages == 0) ||
1647 (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
1648 bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
1649 bp->b_xio.xio_npages++;
1653 bp->b_bcount += blksize;
1654 bp->b_bufsize += blksize;
1657 * NOTE: see bwrite/bawrite code for why we no longer
1658 * undirty tbp here.
1660 * bundirty(tbp); REMOVED
1662 tbp->b_flags &= ~B_ERROR;
1663 tbp->b_cmd = BUF_CMD_WRITE;
1664 BUF_KERNPROC(tbp);
1665 cluster_append(&bp->b_bio1, tbp);
1668 * check for latent dependencies to be handled
1670 if (LIST_FIRST(&tbp->b_dep) != NULL)
1671 buf_start(tbp);
1673 finishcluster:
1674 pmap_qenter_noinval(trunc_page((vm_offset_t)bp->b_data),
1675 (vm_page_t *)bp->b_xio.xio_pages,
1676 bp->b_xio.xio_npages);
1677 if (bp->b_bufsize > bp->b_kvasize) {
1678 panic("cluster_wbuild: b_bufsize(%d) "
1679 "> b_kvasize(%d)\n",
1680 bp->b_bufsize, bp->b_kvasize);
1682 totalwritten += bp->b_bufsize;
1683 bp->b_dirtyoff = 0;
1684 bp->b_dirtyend = bp->b_bufsize;
1685 bp->b_bio1.bio_done = cluster_callback;
1686 bp->b_cmd = BUF_CMD_WRITE;
1688 vfs_busy_pages(vp, bp);
1689 bsetrunningbufspace(bp, bp->b_bufsize);
1690 BUF_KERNPROC(bp);
1691 vn_strategy(vp, &bp->b_bio1);
1693 bytes -= i;
1695 return totalwritten;
1699 * Collect together all the buffers in a cluster, plus add one
1700 * additional buffer passed-in.
1702 * Only pre-existing buffers whos block size matches blksize are collected.
1703 * (this is primarily because HAMMER1 uses varying block sizes and we don't
1704 * want to override its choices).
1706 * This code will not try to collect buffers that it cannot lock, otherwise
1707 * it might deadlock against SMP-friendly filesystems.
1709 static struct cluster_save *
1710 cluster_collectbufs(cluster_cache_t *cc, struct vnode *vp,
1711 struct buf *last_bp, int blksize)
1713 struct cluster_save *buflist;
1714 struct buf *bp;
1715 off_t loffset;
1716 int i, len;
1717 int j;
1718 int k;
1720 len = (int)(cc->v_lastw - cc->v_cstart) / blksize;
1721 KKASSERT(len > 0);
1722 buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1723 M_SEGMENT, M_WAITOK);
1724 buflist->bs_nchildren = 0;
1725 buflist->bs_children = (struct buf **) (buflist + 1);
1726 for (loffset = cc->v_cstart, i = 0, j = 0;
1727 i < len;
1728 (loffset += blksize), i++) {
1729 bp = getcacheblk(vp, loffset,
1730 last_bp->b_bcount, GETBLK_SZMATCH |
1731 GETBLK_NOWAIT);
1732 buflist->bs_children[i] = bp;
1733 if (bp == NULL) {
1734 j = i + 1;
1735 } else if (bp->b_bio2.bio_offset == NOOFFSET) {
1736 VOP_BMAP(bp->b_vp, bp->b_loffset,
1737 &bp->b_bio2.bio_offset,
1738 NULL, NULL, BUF_CMD_WRITE);
1743 * Get rid of gaps
1745 for (k = 0; k < j; ++k) {
1746 if (buflist->bs_children[k]) {
1747 bqrelse(buflist->bs_children[k]);
1748 buflist->bs_children[k] = NULL;
1751 if (j != 0) {
1752 if (j != i) {
1753 bcopy(buflist->bs_children + j,
1754 buflist->bs_children + 0,
1755 sizeof(buflist->bs_children[0]) * (i - j));
1757 i -= j;
1759 buflist->bs_children[i] = bp = last_bp;
1760 if (bp->b_bio2.bio_offset == NOOFFSET) {
1761 VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
1762 NULL, NULL, BUF_CMD_WRITE);
1764 buflist->bs_nchildren = i + 1;
1765 return (buflist);
1768 void
1769 cluster_append(struct bio *bio, struct buf *tbp)
1771 tbp->b_cluster_next = NULL;
1772 if (bio->bio_caller_info1.cluster_head == NULL) {
1773 bio->bio_caller_info1.cluster_head = tbp;
1774 bio->bio_caller_info2.cluster_tail = tbp;
1775 } else {
1776 bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
1777 bio->bio_caller_info2.cluster_tail = tbp;
1781 static
1782 void
1783 cluster_setram(struct buf *bp)
1785 bp->b_flags |= B_RAM;
1786 if (bp->b_xio.xio_npages)
1787 vm_page_flag_set(bp->b_xio.xio_pages[0], PG_RAM);
1790 static
1791 void
1792 cluster_clrram(struct buf *bp)
1794 bp->b_flags &= ~B_RAM;
1795 if (bp->b_xio.xio_npages)
1796 vm_page_flag_clear(bp->b_xio.xio_pages[0], PG_RAM);