2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
39 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/mount.h>
44 #include <sys/rwlock.h>
45 #include <sys/vmmeter.h>
46 #include <sys/vnode.h>
49 #include <vm/vm_param.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_pager.h>
54 #include <vm/vnode_pager.h>
56 #include <fs/nfs/nfsport.h>
57 #include <fs/nfsclient/nfsmount.h>
58 #include <fs/nfsclient/nfs.h>
59 #include <fs/nfsclient/nfsnode.h>
60 #include <fs/nfsclient/nfs_kdtrace.h>
62 extern int newnfs_directio_allow_mmap
;
63 extern struct nfsstats newnfsstats
;
64 extern struct mtx ncl_iod_mutex
;
65 extern int ncl_numasync
;
66 extern enum nfsiod_state ncl_iodwant
[NFS_MAXASYNCDAEMON
];
67 extern struct nfsmount
*ncl_iodmount
[NFS_MAXASYNCDAEMON
];
68 extern int newnfs_directio_enable
;
69 extern int nfs_keep_dirty_on_error
;
71 int ncl_pbuf_freecnt
= -1; /* start out unlimited */
73 static struct buf
*nfs_getcacheblk(struct vnode
*vp
, daddr_t bn
, int size
,
75 static int nfs_directio_write(struct vnode
*vp
, struct uio
*uiop
,
76 struct ucred
*cred
, int ioflag
);
79 * Vnode op for VM getpages.
82 ncl_getpages(struct vop_getpages_args
*ap
)
84 int i
, error
, nextoff
, size
, toff
, count
, npages
;
99 td
= curthread
; /* XXX */
100 cred
= curthread
->td_ucred
; /* XXX */
101 nmp
= VFSTONFS(vp
->v_mount
);
103 npages
= ap
->a_count
;
105 if ((object
= vp
->v_object
) == NULL
) {
106 ncl_printf("nfs_getpages: called with non-merged cache vnode??\n");
107 return (VM_PAGER_ERROR
);
110 if (newnfs_directio_enable
&& !newnfs_directio_allow_mmap
) {
111 mtx_lock(&np
->n_mtx
);
112 if ((np
->n_flag
& NNONCACHE
) && (vp
->v_type
== VREG
)) {
113 mtx_unlock(&np
->n_mtx
);
114 ncl_printf("nfs_getpages: called on non-cacheable vnode??\n");
115 return (VM_PAGER_ERROR
);
117 mtx_unlock(&np
->n_mtx
);
120 mtx_lock(&nmp
->nm_mtx
);
121 if ((nmp
->nm_flag
& NFSMNT_NFSV3
) != 0 &&
122 (nmp
->nm_state
& NFSSTA_GOTFSINFO
) == 0) {
123 mtx_unlock(&nmp
->nm_mtx
);
124 /* We'll never get here for v4, because we always have fsinfo */
125 (void)ncl_fsinfo(nmp
, vp
, cred
, td
);
127 mtx_unlock(&nmp
->nm_mtx
);
130 * If the requested page is partially valid, just return it and
131 * allow the pager to zero-out the blanks. Partially valid pages
132 * can only occur at the file EOF.
134 * XXXGL: is that true for NFS, where short read can occur???
136 VM_OBJECT_WLOCK(object
);
137 if (pages
[npages
- 1]->valid
!= 0 && --npages
== 0)
139 VM_OBJECT_WUNLOCK(object
);
142 * We use only the kva address for the buffer, but this is extremely
143 * convenient and fast.
145 bp
= getpbuf(&ncl_pbuf_freecnt
);
147 kva
= (vm_offset_t
) bp
->b_data
;
148 pmap_qenter(kva
, pages
, npages
);
149 PCPU_INC(cnt
.v_vnodein
);
150 PCPU_ADD(cnt
.v_vnodepgsin
, npages
);
152 count
= npages
<< PAGE_SHIFT
;
153 iov
.iov_base
= (caddr_t
) kva
;
157 uio
.uio_offset
= IDX_TO_OFF(pages
[0]->pindex
);
158 uio
.uio_resid
= count
;
159 uio
.uio_segflg
= UIO_SYSSPACE
;
160 uio
.uio_rw
= UIO_READ
;
163 error
= ncl_readrpc(vp
, &uio
, cred
);
164 pmap_qremove(kva
, npages
);
166 relpbuf(bp
, &ncl_pbuf_freecnt
);
168 if (error
&& (uio
.uio_resid
== count
)) {
169 ncl_printf("nfs_getpages: error %d\n", error
);
170 return (VM_PAGER_ERROR
);
174 * Calculate the number of bytes read and validate only that number
175 * of bytes. Note that due to pending writes, size may be 0. This
176 * does not mean that the remaining data is invalid!
179 size
= count
- uio
.uio_resid
;
180 VM_OBJECT_WLOCK(object
);
181 for (i
= 0, toff
= 0; i
< npages
; i
++, toff
= nextoff
) {
183 nextoff
= toff
+ PAGE_SIZE
;
186 if (nextoff
<= size
) {
188 * Read operation filled an entire page
190 m
->valid
= VM_PAGE_BITS_ALL
;
191 KASSERT(m
->dirty
== 0,
192 ("nfs_getpages: page %p is dirty", m
));
193 } else if (size
> toff
) {
195 * Read operation filled a partial page.
198 vm_page_set_valid_range(m
, 0, size
- toff
);
199 KASSERT(m
->dirty
== 0,
200 ("nfs_getpages: page %p is dirty", m
));
203 * Read operation was short. If no error
204 * occurred we may have hit a zero-fill
205 * section. We leave valid set to 0, and page
206 * is freed by vm_page_readahead_finish() if
207 * its index is not equal to requested, or
208 * page is zeroed and set valid by
209 * vm_pager_get_pages() for requested page.
215 VM_OBJECT_WUNLOCK(object
);
220 return (VM_PAGER_OK
);
224 * Vnode op for VM putpages.
227 ncl_putpages(struct vop_putpages_args
*ap
)
233 int iomode
, must_commit
, i
, error
, npages
, count
;
239 struct nfsmount
*nmp
;
245 td
= curthread
; /* XXX */
246 /* Set the cred to n_writecred for the write rpcs. */
247 if (np
->n_writecred
!= NULL
)
248 cred
= crhold(np
->n_writecred
);
250 cred
= crhold(curthread
->td_ucred
); /* XXX */
251 nmp
= VFSTONFS(vp
->v_mount
);
254 rtvals
= ap
->a_rtvals
;
255 npages
= btoc(count
);
256 offset
= IDX_TO_OFF(pages
[0]->pindex
);
258 mtx_lock(&nmp
->nm_mtx
);
259 if ((nmp
->nm_flag
& NFSMNT_NFSV3
) != 0 &&
260 (nmp
->nm_state
& NFSSTA_GOTFSINFO
) == 0) {
261 mtx_unlock(&nmp
->nm_mtx
);
262 (void)ncl_fsinfo(nmp
, vp
, cred
, td
);
264 mtx_unlock(&nmp
->nm_mtx
);
266 mtx_lock(&np
->n_mtx
);
267 if (newnfs_directio_enable
&& !newnfs_directio_allow_mmap
&&
268 (np
->n_flag
& NNONCACHE
) && (vp
->v_type
== VREG
)) {
269 mtx_unlock(&np
->n_mtx
);
270 ncl_printf("ncl_putpages: called on noncache-able vnode??\n");
271 mtx_lock(&np
->n_mtx
);
274 for (i
= 0; i
< npages
; i
++)
275 rtvals
[i
] = VM_PAGER_ERROR
;
278 * When putting pages, do not extend file past EOF.
280 if (offset
+ count
> np
->n_size
) {
281 count
= np
->n_size
- offset
;
285 mtx_unlock(&np
->n_mtx
);
288 * We use only the kva address for the buffer, but this is extremely
289 * convenient and fast.
291 bp
= getpbuf(&ncl_pbuf_freecnt
);
293 kva
= (vm_offset_t
) bp
->b_data
;
294 pmap_qenter(kva
, pages
, npages
);
295 PCPU_INC(cnt
.v_vnodeout
);
296 PCPU_ADD(cnt
.v_vnodepgsout
, count
);
298 iov
.iov_base
= (caddr_t
) kva
;
302 uio
.uio_offset
= offset
;
303 uio
.uio_resid
= count
;
304 uio
.uio_segflg
= UIO_SYSSPACE
;
305 uio
.uio_rw
= UIO_WRITE
;
308 if ((ap
->a_sync
& VM_PAGER_PUT_SYNC
) == 0)
309 iomode
= NFSWRITE_UNSTABLE
;
311 iomode
= NFSWRITE_FILESYNC
;
313 error
= ncl_writerpc(vp
, &uio
, cred
, &iomode
, &must_commit
, 0);
316 pmap_qremove(kva
, npages
);
317 relpbuf(bp
, &ncl_pbuf_freecnt
);
319 if (error
== 0 || !nfs_keep_dirty_on_error
) {
320 vnode_pager_undirty_pages(pages
, rtvals
, count
- uio
.uio_resid
);
322 ncl_clearcommit(vp
->v_mount
);
328 * For nfs, cache consistency can only be maintained approximately.
329 * Although RFC1094 does not specify the criteria, the following is
330 * believed to be compatible with the reference port.
332 * If the file's modify time on the server has changed since the
333 * last read rpc or you have written to the file,
334 * you may have lost data cache consistency with the
335 * server, so flush all of the file's data out of the cache.
336 * Then force a getattr rpc to ensure that you have up to date
338 * NB: This implies that cache data can be read when up to
339 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
340 * attributes this could be forced by setting n_attrstamp to 0 before
341 * the VOP_GETATTR() call.
344 nfs_bioread_check_cons(struct vnode
*vp
, struct thread
*td
, struct ucred
*cred
)
348 struct nfsnode
*np
= VTONFS(vp
);
352 * Grab the exclusive lock before checking whether the cache is
354 * XXX - We can make this cheaper later (by acquiring cheaper locks).
355 * But for now, this suffices.
357 old_lock
= ncl_upgrade_vnlock(vp
);
358 if (vp
->v_iflag
& VI_DOOMED
) {
359 ncl_downgrade_vnlock(vp
, old_lock
);
363 mtx_lock(&np
->n_mtx
);
364 if (np
->n_flag
& NMODIFIED
) {
365 mtx_unlock(&np
->n_mtx
);
366 if (vp
->v_type
!= VREG
) {
367 if (vp
->v_type
!= VDIR
)
368 panic("nfs: bioread, not dir");
370 error
= ncl_vinvalbuf(vp
, V_SAVE
, td
, 1);
375 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp
);
376 error
= VOP_GETATTR(vp
, &vattr
, cred
);
379 mtx_lock(&np
->n_mtx
);
380 np
->n_mtime
= vattr
.va_mtime
;
381 mtx_unlock(&np
->n_mtx
);
383 mtx_unlock(&np
->n_mtx
);
384 error
= VOP_GETATTR(vp
, &vattr
, cred
);
387 mtx_lock(&np
->n_mtx
);
388 if ((np
->n_flag
& NSIZECHANGED
)
389 || (NFS_TIMESPEC_COMPARE(&np
->n_mtime
, &vattr
.va_mtime
))) {
390 mtx_unlock(&np
->n_mtx
);
391 if (vp
->v_type
== VDIR
)
393 error
= ncl_vinvalbuf(vp
, V_SAVE
, td
, 1);
396 mtx_lock(&np
->n_mtx
);
397 np
->n_mtime
= vattr
.va_mtime
;
398 np
->n_flag
&= ~NSIZECHANGED
;
400 mtx_unlock(&np
->n_mtx
);
403 ncl_downgrade_vnlock(vp
, old_lock
);
408 * Vnode op for read using bio
411 ncl_bioread(struct vnode
*vp
, struct uio
*uio
, int ioflag
, struct ucred
*cred
)
413 struct nfsnode
*np
= VTONFS(vp
);
415 struct buf
*bp
, *rabp
;
417 struct nfsmount
*nmp
= VFSTONFS(vp
->v_mount
);
421 int nra
, error
= 0, n
= 0, on
= 0;
424 KASSERT(uio
->uio_rw
== UIO_READ
, ("ncl_read mode"));
425 if (uio
->uio_resid
== 0)
427 if (uio
->uio_offset
< 0) /* XXX VDIR cookies can be negative */
431 mtx_lock(&nmp
->nm_mtx
);
432 if ((nmp
->nm_flag
& NFSMNT_NFSV3
) != 0 &&
433 (nmp
->nm_state
& NFSSTA_GOTFSINFO
) == 0) {
434 mtx_unlock(&nmp
->nm_mtx
);
435 (void)ncl_fsinfo(nmp
, vp
, cred
, td
);
436 mtx_lock(&nmp
->nm_mtx
);
438 if (nmp
->nm_rsize
== 0 || nmp
->nm_readdirsize
== 0)
439 (void) newnfs_iosize(nmp
);
441 tmp_off
= uio
->uio_offset
+ uio
->uio_resid
;
442 if (vp
->v_type
!= VDIR
&&
443 (tmp_off
> nmp
->nm_maxfilesize
|| tmp_off
< uio
->uio_offset
)) {
444 mtx_unlock(&nmp
->nm_mtx
);
447 mtx_unlock(&nmp
->nm_mtx
);
449 if (newnfs_directio_enable
&& (ioflag
& IO_DIRECT
) && (vp
->v_type
== VREG
))
450 /* No caching/ no readaheads. Just read data into the user buffer */
451 return ncl_readrpc(vp
, uio
, cred
);
453 biosize
= vp
->v_bufobj
.bo_bsize
;
454 seqcount
= (int)((off_t
)(ioflag
>> IO_SEQSHIFT
) * biosize
/ BKVASIZE
);
456 error
= nfs_bioread_check_cons(vp
, td
, cred
);
463 mtx_lock(&np
->n_mtx
);
465 mtx_unlock(&np
->n_mtx
);
467 switch (vp
->v_type
) {
469 NFSINCRGLOBAL(newnfsstats
.biocache_reads
);
470 lbn
= uio
->uio_offset
/ biosize
;
471 on
= uio
->uio_offset
- (lbn
* biosize
);
474 * Start the read ahead(s), as required.
476 if (nmp
->nm_readahead
> 0) {
477 for (nra
= 0; nra
< nmp
->nm_readahead
&& nra
< seqcount
&&
478 (off_t
)(lbn
+ 1 + nra
) * biosize
< nsize
; nra
++) {
479 rabn
= lbn
+ 1 + nra
;
480 if (incore(&vp
->v_bufobj
, rabn
) == NULL
) {
481 rabp
= nfs_getcacheblk(vp
, rabn
, biosize
, td
);
483 error
= newnfs_sigintr(nmp
, td
);
484 return (error
? error
: EINTR
);
486 if ((rabp
->b_flags
& (B_CACHE
|B_DELWRI
)) == 0) {
487 rabp
->b_flags
|= B_ASYNC
;
488 rabp
->b_iocmd
= BIO_READ
;
489 vfs_busy_pages(rabp
, 0);
490 if (ncl_asyncio(nmp
, rabp
, cred
, td
)) {
491 rabp
->b_flags
|= B_INVAL
;
492 rabp
->b_ioflags
|= BIO_ERROR
;
493 vfs_unbusy_pages(rabp
);
504 /* Note that bcount is *not* DEV_BSIZE aligned. */
506 if ((off_t
)lbn
* biosize
>= nsize
) {
508 } else if ((off_t
)(lbn
+ 1) * biosize
> nsize
) {
509 bcount
= nsize
- (off_t
)lbn
* biosize
;
511 bp
= nfs_getcacheblk(vp
, lbn
, bcount
, td
);
514 error
= newnfs_sigintr(nmp
, td
);
515 return (error
? error
: EINTR
);
519 * If B_CACHE is not set, we must issue the read. If this
520 * fails, we return an error.
523 if ((bp
->b_flags
& B_CACHE
) == 0) {
524 bp
->b_iocmd
= BIO_READ
;
525 vfs_busy_pages(bp
, 0);
526 error
= ncl_doio(vp
, bp
, cred
, td
, 0);
534 * on is the offset into the current bp. Figure out how many
535 * bytes we can copy out of the bp. Note that bcount is
536 * NOT DEV_BSIZE aligned.
538 * Then figure out how many bytes we can copy into the uio.
543 n
= MIN((unsigned)(bcount
- on
), uio
->uio_resid
);
546 NFSINCRGLOBAL(newnfsstats
.biocache_readlinks
);
547 bp
= nfs_getcacheblk(vp
, (daddr_t
)0, NFS_MAXPATHLEN
, td
);
549 error
= newnfs_sigintr(nmp
, td
);
550 return (error
? error
: EINTR
);
552 if ((bp
->b_flags
& B_CACHE
) == 0) {
553 bp
->b_iocmd
= BIO_READ
;
554 vfs_busy_pages(bp
, 0);
555 error
= ncl_doio(vp
, bp
, cred
, td
, 0);
557 bp
->b_ioflags
|= BIO_ERROR
;
562 n
= MIN(uio
->uio_resid
, NFS_MAXPATHLEN
- bp
->b_resid
);
566 NFSINCRGLOBAL(newnfsstats
.biocache_readdirs
);
567 if (np
->n_direofoffset
568 && uio
->uio_offset
>= np
->n_direofoffset
) {
571 lbn
= (uoff_t
)uio
->uio_offset
/ NFS_DIRBLKSIZ
;
572 on
= uio
->uio_offset
& (NFS_DIRBLKSIZ
- 1);
573 bp
= nfs_getcacheblk(vp
, lbn
, NFS_DIRBLKSIZ
, td
);
575 error
= newnfs_sigintr(nmp
, td
);
576 return (error
? error
: EINTR
);
578 if ((bp
->b_flags
& B_CACHE
) == 0) {
579 bp
->b_iocmd
= BIO_READ
;
580 vfs_busy_pages(bp
, 0);
581 error
= ncl_doio(vp
, bp
, cred
, td
, 0);
585 while (error
== NFSERR_BAD_COOKIE
) {
587 error
= ncl_vinvalbuf(vp
, 0, td
, 1);
589 * Yuck! The directory has been modified on the
590 * server. The only way to get the block is by
591 * reading from the beginning to get all the
594 * Leave the last bp intact unless there is an error.
595 * Loop back up to the while if the error is another
596 * NFSERR_BAD_COOKIE (double yuch!).
598 for (i
= 0; i
<= lbn
&& !error
; i
++) {
599 if (np
->n_direofoffset
600 && (i
* NFS_DIRBLKSIZ
) >= np
->n_direofoffset
)
602 bp
= nfs_getcacheblk(vp
, i
, NFS_DIRBLKSIZ
, td
);
604 error
= newnfs_sigintr(nmp
, td
);
605 return (error
? error
: EINTR
);
607 if ((bp
->b_flags
& B_CACHE
) == 0) {
608 bp
->b_iocmd
= BIO_READ
;
609 vfs_busy_pages(bp
, 0);
610 error
= ncl_doio(vp
, bp
, cred
, td
, 0);
612 * no error + B_INVAL == directory EOF,
615 if (error
== 0 && (bp
->b_flags
& B_INVAL
))
619 * An error will throw away the block and the
620 * for loop will break out. If no error and this
621 * is not the block we want, we throw away the
622 * block and go for the next one via the for loop.
624 if (error
|| i
< lbn
)
629 * The above while is repeated if we hit another cookie
630 * error. If we hit an error and it wasn't a cookie error,
638 * If not eof and read aheads are enabled, start one.
639 * (You need the current block first, so that you have the
640 * directory offset cookie of the next block.)
642 if (nmp
->nm_readahead
> 0 &&
643 (bp
->b_flags
& B_INVAL
) == 0 &&
644 (np
->n_direofoffset
== 0 ||
645 (lbn
+ 1) * NFS_DIRBLKSIZ
< np
->n_direofoffset
) &&
646 incore(&vp
->v_bufobj
, lbn
+ 1) == NULL
) {
647 rabp
= nfs_getcacheblk(vp
, lbn
+ 1, NFS_DIRBLKSIZ
, td
);
649 if ((rabp
->b_flags
& (B_CACHE
|B_DELWRI
)) == 0) {
650 rabp
->b_flags
|= B_ASYNC
;
651 rabp
->b_iocmd
= BIO_READ
;
652 vfs_busy_pages(rabp
, 0);
653 if (ncl_asyncio(nmp
, rabp
, cred
, td
)) {
654 rabp
->b_flags
|= B_INVAL
;
655 rabp
->b_ioflags
|= BIO_ERROR
;
656 vfs_unbusy_pages(rabp
);
665 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
666 * chopped for the EOF condition, we cannot tell how large
667 * NFS directories are going to be until we hit EOF. So
668 * an NFS directory buffer is *not* chopped to its EOF. Now,
669 * it just so happens that b_resid will effectively chop it
670 * to EOF. *BUT* this information is lost if the buffer goes
671 * away and is reconstituted into a B_CACHE state ( due to
672 * being VMIO ) later. So we keep track of the directory eof
673 * in np->n_direofoffset and chop it off as an extra step
676 n
= lmin(uio
->uio_resid
, NFS_DIRBLKSIZ
- bp
->b_resid
- on
);
677 if (np
->n_direofoffset
&& n
> np
->n_direofoffset
- uio
->uio_offset
)
678 n
= np
->n_direofoffset
- uio
->uio_offset
;
681 ncl_printf(" ncl_bioread: type %x unexpected\n", vp
->v_type
);
687 error
= vn_io_fault_uiomove(bp
->b_data
+ on
, (int)n
, uio
);
689 if (vp
->v_type
== VLNK
)
693 } while (error
== 0 && uio
->uio_resid
> 0 && n
> 0);
698 * The NFS write path cannot handle iovecs with len > 1. So we need to
699 * break up iovecs accordingly (restricting them to wsize).
700 * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
701 * For the ASYNC case, 2 copies are needed. The first a copy from the
702 * user buffer to a staging buffer and then a second copy from the staging
703 * buffer to mbufs. This can be optimized by copying from the user buffer
704 * directly into mbufs and passing the chain down, but that requires a
705 * fair amount of re-working of the relevant codepaths (and can be done
709 nfs_directio_write(vp
, uiop
, cred
, ioflag
)
716 struct nfsmount
*nmp
= VFSTONFS(vp
->v_mount
);
717 struct thread
*td
= uiop
->uio_td
;
721 mtx_lock(&nmp
->nm_mtx
);
722 wsize
= nmp
->nm_wsize
;
723 mtx_unlock(&nmp
->nm_mtx
);
724 if (ioflag
& IO_SYNC
) {
725 int iomode
, must_commit
;
729 while (uiop
->uio_resid
> 0) {
730 size
= MIN(uiop
->uio_resid
, wsize
);
731 size
= MIN(uiop
->uio_iov
->iov_len
, size
);
732 iov
.iov_base
= uiop
->uio_iov
->iov_base
;
736 uio
.uio_offset
= uiop
->uio_offset
;
737 uio
.uio_resid
= size
;
738 uio
.uio_segflg
= UIO_USERSPACE
;
739 uio
.uio_rw
= UIO_WRITE
;
741 iomode
= NFSWRITE_FILESYNC
;
742 error
= ncl_writerpc(vp
, &uio
, cred
, &iomode
,
744 KASSERT((must_commit
== 0),
745 ("ncl_directio_write: Did not commit write"));
748 uiop
->uio_offset
+= size
;
749 uiop
->uio_resid
-= size
;
750 if (uiop
->uio_iov
->iov_len
<= size
) {
754 uiop
->uio_iov
->iov_base
=
755 (char *)uiop
->uio_iov
->iov_base
+ size
;
756 uiop
->uio_iov
->iov_len
-= size
;
765 * Break up the write into blocksize chunks and hand these
766 * over to nfsiod's for write back.
767 * Unfortunately, this incurs a copy of the data. Since
768 * the user could modify the buffer before the write is
771 * The obvious optimization here is that one of the 2 copies
772 * in the async write path can be eliminated by copying the
773 * data here directly into mbufs and passing the mbuf chain
774 * down. But that will require a fair amount of re-working
775 * of the code and can be done if there's enough interest
776 * in NFS directio access.
778 while (uiop
->uio_resid
> 0) {
779 size
= MIN(uiop
->uio_resid
, wsize
);
780 size
= MIN(uiop
->uio_iov
->iov_len
, size
);
781 bp
= getpbuf(&ncl_pbuf_freecnt
);
782 t_uio
= malloc(sizeof(struct uio
), M_NFSDIRECTIO
, M_WAITOK
);
783 t_iov
= malloc(sizeof(struct iovec
), M_NFSDIRECTIO
, M_WAITOK
);
784 t_iov
->iov_base
= malloc(size
, M_NFSDIRECTIO
, M_WAITOK
);
785 t_iov
->iov_len
= size
;
786 t_uio
->uio_iov
= t_iov
;
787 t_uio
->uio_iovcnt
= 1;
788 t_uio
->uio_offset
= uiop
->uio_offset
;
789 t_uio
->uio_resid
= size
;
790 t_uio
->uio_segflg
= UIO_SYSSPACE
;
791 t_uio
->uio_rw
= UIO_WRITE
;
793 KASSERT(uiop
->uio_segflg
== UIO_USERSPACE
||
794 uiop
->uio_segflg
== UIO_SYSSPACE
,
795 ("nfs_directio_write: Bad uio_segflg"));
796 if (uiop
->uio_segflg
== UIO_USERSPACE
) {
797 error
= copyin(uiop
->uio_iov
->iov_base
,
798 t_iov
->iov_base
, size
);
803 * UIO_SYSSPACE may never happen, but handle
804 * it just in case it does.
806 bcopy(uiop
->uio_iov
->iov_base
, t_iov
->iov_base
,
808 bp
->b_flags
|= B_DIRECT
;
809 bp
->b_iocmd
= BIO_WRITE
;
810 if (cred
!= NOCRED
) {
814 bp
->b_wcred
= NOCRED
;
815 bp
->b_caller1
= (void *)t_uio
;
817 error
= ncl_asyncio(nmp
, bp
, NOCRED
, td
);
820 free(t_iov
->iov_base
, M_NFSDIRECTIO
);
821 free(t_iov
, M_NFSDIRECTIO
);
822 free(t_uio
, M_NFSDIRECTIO
);
824 relpbuf(bp
, &ncl_pbuf_freecnt
);
829 uiop
->uio_offset
+= size
;
830 uiop
->uio_resid
-= size
;
831 if (uiop
->uio_iov
->iov_len
<= size
) {
835 uiop
->uio_iov
->iov_base
=
836 (char *)uiop
->uio_iov
->iov_base
+ size
;
837 uiop
->uio_iov
->iov_len
-= size
;
845 * Vnode op for write using bio
848 ncl_write(struct vop_write_args
*ap
)
851 struct uio
*uio
= ap
->a_uio
;
852 struct thread
*td
= uio
->uio_td
;
853 struct vnode
*vp
= ap
->a_vp
;
854 struct nfsnode
*np
= VTONFS(vp
);
855 struct ucred
*cred
= ap
->a_cred
;
856 int ioflag
= ap
->a_ioflag
;
859 struct nfsmount
*nmp
= VFSTONFS(vp
->v_mount
);
861 int bcount
, noncontig_write
, obcount
;
862 int bp_cached
, n
, on
, error
= 0, error1
, wouldcommit
;
863 size_t orig_resid
, local_resid
;
864 off_t orig_size
, tmp_off
;
866 KASSERT(uio
->uio_rw
== UIO_WRITE
, ("ncl_write mode"));
867 KASSERT(uio
->uio_segflg
!= UIO_USERSPACE
|| uio
->uio_td
== curthread
,
869 if (vp
->v_type
!= VREG
)
871 mtx_lock(&np
->n_mtx
);
872 if (np
->n_flag
& NWRITEERR
) {
873 np
->n_flag
&= ~NWRITEERR
;
874 mtx_unlock(&np
->n_mtx
);
875 return (np
->n_error
);
877 mtx_unlock(&np
->n_mtx
);
878 mtx_lock(&nmp
->nm_mtx
);
879 if ((nmp
->nm_flag
& NFSMNT_NFSV3
) != 0 &&
880 (nmp
->nm_state
& NFSSTA_GOTFSINFO
) == 0) {
881 mtx_unlock(&nmp
->nm_mtx
);
882 (void)ncl_fsinfo(nmp
, vp
, cred
, td
);
883 mtx_lock(&nmp
->nm_mtx
);
885 if (nmp
->nm_wsize
== 0)
886 (void) newnfs_iosize(nmp
);
887 mtx_unlock(&nmp
->nm_mtx
);
890 * Synchronously flush pending buffers if we are in synchronous
891 * mode or if we are appending.
893 if (ioflag
& (IO_APPEND
| IO_SYNC
)) {
894 mtx_lock(&np
->n_mtx
);
895 if (np
->n_flag
& NMODIFIED
) {
896 mtx_unlock(&np
->n_mtx
);
897 #ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
899 * Require non-blocking, synchronous writes to
900 * dirty files to inform the program it needs
901 * to fsync(2) explicitly.
903 if (ioflag
& IO_NDELAY
)
907 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp
);
908 error
= ncl_vinvalbuf(vp
, V_SAVE
, td
, 1);
912 mtx_unlock(&np
->n_mtx
);
915 orig_resid
= uio
->uio_resid
;
916 mtx_lock(&np
->n_mtx
);
917 orig_size
= np
->n_size
;
918 mtx_unlock(&np
->n_mtx
);
921 * If IO_APPEND then load uio_offset. We restart here if we cannot
922 * get the append lock.
924 if (ioflag
& IO_APPEND
) {
926 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp
);
927 error
= VOP_GETATTR(vp
, &vattr
, cred
);
930 mtx_lock(&np
->n_mtx
);
931 uio
->uio_offset
= np
->n_size
;
932 mtx_unlock(&np
->n_mtx
);
935 if (uio
->uio_offset
< 0)
937 tmp_off
= uio
->uio_offset
+ uio
->uio_resid
;
938 if (tmp_off
> nmp
->nm_maxfilesize
|| tmp_off
< uio
->uio_offset
)
940 if (uio
->uio_resid
== 0)
943 if (newnfs_directio_enable
&& (ioflag
& IO_DIRECT
) && vp
->v_type
== VREG
)
944 return nfs_directio_write(vp
, uio
, cred
, ioflag
);
947 * Maybe this should be above the vnode op call, but so long as
948 * file servers have no limits, i don't think it matters
950 if (vn_rlimit_fsize(vp
, uio
, td
))
953 biosize
= vp
->v_bufobj
.bo_bsize
;
955 * Find all of this file's B_NEEDCOMMIT buffers. If our writes
956 * would exceed the local maximum per-file write commit size when
957 * combined with those, we must decide whether to flush,
958 * go synchronous, or return error. We don't bother checking
959 * IO_UNIT -- we just make all writes atomic anyway, as there's
960 * no point optimizing for something that really won't ever happen.
963 if (!(ioflag
& IO_SYNC
)) {
966 mtx_lock(&np
->n_mtx
);
968 mtx_unlock(&np
->n_mtx
);
969 if (nflag
& NMODIFIED
) {
970 BO_LOCK(&vp
->v_bufobj
);
971 if (vp
->v_bufobj
.bo_dirty
.bv_cnt
!= 0) {
972 TAILQ_FOREACH(bp
, &vp
->v_bufobj
.bo_dirty
.bv_hd
,
974 if (bp
->b_flags
& B_NEEDCOMMIT
)
975 wouldcommit
+= bp
->b_bcount
;
978 BO_UNLOCK(&vp
->v_bufobj
);
983 if (!(ioflag
& IO_SYNC
)) {
984 wouldcommit
+= biosize
;
985 if (wouldcommit
> nmp
->nm_wcommitsize
) {
987 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp
);
988 error
= ncl_vinvalbuf(vp
, V_SAVE
, td
, 1);
991 wouldcommit
= biosize
;
995 NFSINCRGLOBAL(newnfsstats
.biocache_writes
);
996 lbn
= uio
->uio_offset
/ biosize
;
997 on
= uio
->uio_offset
- (lbn
* biosize
);
998 n
= MIN((unsigned)(biosize
- on
), uio
->uio_resid
);
1001 * Handle direct append and file extension cases, calculate
1002 * unaligned buffer size.
1004 mtx_lock(&np
->n_mtx
);
1005 if ((np
->n_flag
& NHASBEENLOCKED
) == 0 &&
1006 (nmp
->nm_flag
& NFSMNT_NONCONTIGWR
) != 0)
1007 noncontig_write
= 1;
1009 noncontig_write
= 0;
1010 if ((uio
->uio_offset
== np
->n_size
||
1011 (noncontig_write
!= 0 &&
1012 lbn
== (np
->n_size
/ biosize
) &&
1013 uio
->uio_offset
+ n
> np
->n_size
)) && n
) {
1014 mtx_unlock(&np
->n_mtx
);
1016 * Get the buffer (in its pre-append state to maintain
1017 * B_CACHE if it was previously set). Resize the
1018 * nfsnode after we have locked the buffer to prevent
1019 * readers from reading garbage.
1021 obcount
= np
->n_size
- (lbn
* biosize
);
1022 bp
= nfs_getcacheblk(vp
, lbn
, obcount
, td
);
1027 mtx_lock(&np
->n_mtx
);
1028 np
->n_size
= uio
->uio_offset
+ n
;
1029 np
->n_flag
|= NMODIFIED
;
1030 vnode_pager_setsize(vp
, np
->n_size
);
1031 mtx_unlock(&np
->n_mtx
);
1033 save
= bp
->b_flags
& B_CACHE
;
1035 allocbuf(bp
, bcount
);
1036 bp
->b_flags
|= save
;
1037 if (noncontig_write
!= 0 && on
> obcount
)
1038 vfs_bio_bzero_buf(bp
, obcount
, on
-
1043 * Obtain the locked cache block first, and then
1044 * adjust the file's size as appropriate.
1047 if ((off_t
)lbn
* biosize
+ bcount
< np
->n_size
) {
1048 if ((off_t
)(lbn
+ 1) * biosize
< np
->n_size
)
1051 bcount
= np
->n_size
- (off_t
)lbn
* biosize
;
1053 mtx_unlock(&np
->n_mtx
);
1054 bp
= nfs_getcacheblk(vp
, lbn
, bcount
, td
);
1055 mtx_lock(&np
->n_mtx
);
1056 if (uio
->uio_offset
+ n
> np
->n_size
) {
1057 np
->n_size
= uio
->uio_offset
+ n
;
1058 np
->n_flag
|= NMODIFIED
;
1059 vnode_pager_setsize(vp
, np
->n_size
);
1061 mtx_unlock(&np
->n_mtx
);
1065 error
= newnfs_sigintr(nmp
, td
);
1072 * Issue a READ if B_CACHE is not set. In special-append
1073 * mode, B_CACHE is based on the buffer prior to the write
1074 * op and is typically set, avoiding the read. If a read
1075 * is required in special append mode, the server will
1076 * probably send us a short-read since we extended the file
1077 * on our end, resulting in b_resid == 0 and, thusly,
1078 * B_CACHE getting set.
1080 * We can also avoid issuing the read if the write covers
1081 * the entire buffer. We have to make sure the buffer state
1082 * is reasonable in this case since we will not be initiating
1083 * I/O. See the comments in kern/vfs_bio.c's getblk() for
1086 * B_CACHE may also be set due to the buffer being cached
1091 if (on
== 0 && n
== bcount
) {
1092 if ((bp
->b_flags
& B_CACHE
) == 0)
1094 bp
->b_flags
|= B_CACHE
;
1095 bp
->b_flags
&= ~B_INVAL
;
1096 bp
->b_ioflags
&= ~BIO_ERROR
;
1099 if ((bp
->b_flags
& B_CACHE
) == 0) {
1100 bp
->b_iocmd
= BIO_READ
;
1101 vfs_busy_pages(bp
, 0);
1102 error
= ncl_doio(vp
, bp
, cred
, td
, 0);
1108 if (bp
->b_wcred
== NOCRED
)
1109 bp
->b_wcred
= crhold(cred
);
1110 mtx_lock(&np
->n_mtx
);
1111 np
->n_flag
|= NMODIFIED
;
1112 mtx_unlock(&np
->n_mtx
);
1115 * If dirtyend exceeds file size, chop it down. This should
1116 * not normally occur but there is an append race where it
1117 * might occur XXX, so we log it.
1119 * If the chopping creates a reverse-indexed or degenerate
1120 * situation with dirtyoff/end, we 0 both of them.
1123 if (bp
->b_dirtyend
> bcount
) {
1124 ncl_printf("NFS append race @%lx:%d\n",
1125 (long)bp
->b_blkno
* DEV_BSIZE
,
1126 bp
->b_dirtyend
- bcount
);
1127 bp
->b_dirtyend
= bcount
;
1130 if (bp
->b_dirtyoff
>= bp
->b_dirtyend
)
1131 bp
->b_dirtyoff
= bp
->b_dirtyend
= 0;
1134 * If the new write will leave a contiguous dirty
1135 * area, just update the b_dirtyoff and b_dirtyend,
1136 * otherwise force a write rpc of the old dirty area.
1138 * If there has been a file lock applied to this file
1139 * or vfs.nfs.old_noncontig_writing is set, do the following:
1140 * While it is possible to merge discontiguous writes due to
1141 * our having a B_CACHE buffer ( and thus valid read data
1142 * for the hole), we don't because it could lead to
1143 * significant cache coherency problems with multiple clients,
1144 * especially if locking is implemented later on.
1146 * If vfs.nfs.old_noncontig_writing is not set and there has
1147 * not been file locking done on this file:
1148 * Relax coherency a bit for the sake of performance and
1149 * expand the current dirty region to contain the new
1150 * write even if it means we mark some non-dirty data as
1154 if (noncontig_write
== 0 && bp
->b_dirtyend
> 0 &&
1155 (on
> bp
->b_dirtyend
|| (on
+ n
) < bp
->b_dirtyoff
)) {
1156 if (bwrite(bp
) == EINTR
) {
1163 local_resid
= uio
->uio_resid
;
1164 error
= vn_io_fault_uiomove((char *)bp
->b_data
+ on
, n
, uio
);
1166 if (error
!= 0 && !bp_cached
) {
1168 * This block has no other content then what
1169 * possibly was written by the faulty uiomove.
1170 * Release it, forgetting the data pages, to
1171 * prevent the leak of uninitialized data to
1174 bp
->b_ioflags
|= BIO_ERROR
;
1176 uio
->uio_offset
-= local_resid
- uio
->uio_resid
;
1177 uio
->uio_resid
= local_resid
;
1182 * Since this block is being modified, it must be written
1183 * again and not just committed. Since write clustering does
1184 * not work for the stage 1 data write, only the stage 2
1185 * commit rpc, we have to clear B_CLUSTEROK as well.
1187 bp
->b_flags
&= ~(B_NEEDCOMMIT
| B_CLUSTEROK
);
1190 * Get the partial update on the progress made from
1191 * uiomove, if an error occurred.
1194 n
= local_resid
- uio
->uio_resid
;
1197 * Only update dirtyoff/dirtyend if not a degenerate
1201 if (bp
->b_dirtyend
> 0) {
1202 bp
->b_dirtyoff
= min(on
, bp
->b_dirtyoff
);
1203 bp
->b_dirtyend
= max((on
+ n
), bp
->b_dirtyend
);
1205 bp
->b_dirtyoff
= on
;
1206 bp
->b_dirtyend
= on
+ n
;
1208 vfs_bio_set_valid(bp
, on
, n
);
1212 * If IO_SYNC do bwrite().
1214 * IO_INVAL appears to be unused. The idea appears to be
1215 * to turn off caching in this case. Very odd. XXX
1217 if ((ioflag
& IO_SYNC
)) {
1218 if (ioflag
& IO_INVAL
)
1219 bp
->b_flags
|= B_NOCACHE
;
1220 error1
= bwrite(bp
);
1226 } else if ((n
+ on
) == biosize
) {
1227 bp
->b_flags
|= B_ASYNC
;
1228 (void) ncl_writebp(bp
, 0, NULL
);
1235 } while (uio
->uio_resid
> 0 && n
> 0);
1238 if (ioflag
& IO_UNIT
) {
1240 vattr
.va_size
= orig_size
;
1241 /* IO_SYNC is handled implicitely */
1242 (void)VOP_SETATTR(vp
, &vattr
, cred
);
1243 uio
->uio_offset
-= orig_resid
- uio
->uio_resid
;
1244 uio
->uio_resid
= orig_resid
;
1252 * Get an nfs cache block.
1254 * Allocate a new one if the block isn't currently in the cache
1255 * and return the block marked busy. If the calling process is
1256 * interrupted by a signal for an interruptible mount point, return
1259 * The caller must carefully deal with the possible B_INVAL state of
1260 * the buffer. ncl_doio() clears B_INVAL (and ncl_asyncio() clears it
1261 * indirectly), so synchronous reads can be issued without worrying about
1262 * the B_INVAL state. We have to be a little more careful when dealing
1263 * with writes (see comments in nfs_write()) when extending a file past
1267 nfs_getcacheblk(struct vnode
*vp
, daddr_t bn
, int size
, struct thread
*td
)
1271 struct nfsmount
*nmp
;
1276 if (nmp
->nm_flag
& NFSMNT_INT
) {
1279 newnfs_set_sigmask(td
, &oldset
);
1280 bp
= getblk(vp
, bn
, size
, PCATCH
, 0, 0);
1281 newnfs_restore_sigmask(td
, &oldset
);
1282 while (bp
== NULL
) {
1283 if (newnfs_sigintr(nmp
, td
))
1285 bp
= getblk(vp
, bn
, size
, 0, 2 * hz
, 0);
1288 bp
= getblk(vp
, bn
, size
, 0, 0, 0);
1291 if (vp
->v_type
== VREG
)
1292 bp
->b_blkno
= bn
* (vp
->v_bufobj
.bo_bsize
/ DEV_BSIZE
);
1297 * Flush and invalidate all dirty buffers. If another process is already
1298 * doing the flush, just wait for completion.
1301 ncl_vinvalbuf(struct vnode
*vp
, int flags
, struct thread
*td
, int intrflg
)
1303 struct nfsnode
*np
= VTONFS(vp
);
1304 struct nfsmount
*nmp
= VFSTONFS(vp
->v_mount
);
1305 int error
= 0, slpflag
, slptimeo
;
1308 ASSERT_VOP_LOCKED(vp
, "ncl_vinvalbuf");
1310 if ((nmp
->nm_flag
& NFSMNT_INT
) == 0)
1312 if ((nmp
->nm_mountp
->mnt_kern_flag
& MNTK_UNMOUNTF
))
1322 old_lock
= ncl_upgrade_vnlock(vp
);
1323 if (vp
->v_iflag
& VI_DOOMED
) {
1325 * Since vgonel() uses the generic vinvalbuf() to flush
1326 * dirty buffers and it does not call this function, it
1327 * is safe to just return OK when VI_DOOMED is set.
1329 ncl_downgrade_vnlock(vp
, old_lock
);
1334 * Now, flush as required.
1336 if ((flags
& V_SAVE
) && (vp
->v_bufobj
.bo_object
!= NULL
)) {
1337 VM_OBJECT_WLOCK(vp
->v_bufobj
.bo_object
);
1338 vm_object_page_clean(vp
->v_bufobj
.bo_object
, 0, 0, OBJPC_SYNC
);
1339 VM_OBJECT_WUNLOCK(vp
->v_bufobj
.bo_object
);
1341 * If the page clean was interrupted, fail the invalidation.
1342 * Not doing so, we run the risk of losing dirty pages in the
1343 * vinvalbuf() call below.
1345 if (intrflg
&& (error
= newnfs_sigintr(nmp
, td
)))
1349 error
= vinvalbuf(vp
, flags
, slpflag
, 0);
1351 if (intrflg
&& (error
= newnfs_sigintr(nmp
, td
)))
1353 error
= vinvalbuf(vp
, flags
, 0, slptimeo
);
1355 if (NFSHASPNFS(nmp
)) {
1356 nfscl_layoutcommit(vp
, td
);
1358 * Invalidate the attribute cache, since writes to a DS
1359 * won't update the size attribute.
1361 mtx_lock(&np
->n_mtx
);
1362 np
->n_attrstamp
= 0;
1364 mtx_lock(&np
->n_mtx
);
1365 if (np
->n_directio_asyncwr
== 0)
1366 np
->n_flag
&= ~NMODIFIED
;
1367 mtx_unlock(&np
->n_mtx
);
1369 ncl_downgrade_vnlock(vp
, old_lock
);
1374 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1375 * This is mainly to avoid queueing async I/O requests when the nfsiods
1376 * are all hung on a dead server.
1378 * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1379 * is eventually dequeued by the async daemon, ncl_doio() *will*.
1382 ncl_asyncio(struct nfsmount
*nmp
, struct buf
*bp
, struct ucred
*cred
, struct thread
*td
)
1391 * Commits are usually short and sweet so lets save some cpu and
1392 * leave the async daemons for more important rpc's (such as reads
1395 * Readdirplus RPCs do vget()s to acquire the vnodes for entries
1396 * in the directory in order to update attributes. This can deadlock
1397 * with another thread that is waiting for async I/O to be done by
1398 * an nfsiod thread while holding a lock on one of these vnodes.
1399 * To avoid this deadlock, don't allow the async nfsiod threads to
1400 * perform Readdirplus RPCs.
1402 mtx_lock(&ncl_iod_mutex
);
1403 if ((bp
->b_iocmd
== BIO_WRITE
&& (bp
->b_flags
& B_NEEDCOMMIT
) &&
1404 (nmp
->nm_bufqiods
> ncl_numasync
/ 2)) ||
1405 (bp
->b_vp
->v_type
== VDIR
&& (nmp
->nm_flag
& NFSMNT_RDIRPLUS
))) {
1406 mtx_unlock(&ncl_iod_mutex
);
1410 if (nmp
->nm_flag
& NFSMNT_INT
)
1415 * Find a free iod to process this request.
1417 for (iod
= 0; iod
< ncl_numasync
; iod
++)
1418 if (ncl_iodwant
[iod
] == NFSIOD_AVAILABLE
) {
1424 * Try to create one if none are free.
1430 * Found one, so wake it up and tell it which
1433 NFS_DPF(ASYNCIO
, ("ncl_asyncio: waking iod %d for mount %p\n",
1435 ncl_iodwant
[iod
] = NFSIOD_NOT_AVAILABLE
;
1436 ncl_iodmount
[iod
] = nmp
;
1438 wakeup(&ncl_iodwant
[iod
]);
1442 * If none are free, we may already have an iod working on this mount
1443 * point. If so, it will process our request.
1446 if (nmp
->nm_bufqiods
> 0) {
1448 ("ncl_asyncio: %d iods are already processing mount %p\n",
1449 nmp
->nm_bufqiods
, nmp
));
1455 * If we have an iod which can process the request, then queue
1460 * Ensure that the queue never grows too large. We still want
1461 * to asynchronize so we block rather then return EIO.
1463 while (nmp
->nm_bufqlen
>= 2*ncl_numasync
) {
1465 ("ncl_asyncio: waiting for mount %p queue to drain\n", nmp
));
1466 nmp
->nm_bufqwant
= TRUE
;
1467 error
= newnfs_msleep(td
, &nmp
->nm_bufq
,
1468 &ncl_iod_mutex
, slpflag
| PRIBIO
, "nfsaio",
1471 error2
= newnfs_sigintr(nmp
, td
);
1473 mtx_unlock(&ncl_iod_mutex
);
1476 if (slpflag
== PCATCH
) {
1482 * We might have lost our iod while sleeping,
1483 * so check and loop if necessary.
1488 /* We might have lost our nfsiod */
1489 if (nmp
->nm_bufqiods
== 0) {
1491 ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp
));
1495 if (bp
->b_iocmd
== BIO_READ
) {
1496 if (bp
->b_rcred
== NOCRED
&& cred
!= NOCRED
)
1497 bp
->b_rcred
= crhold(cred
);
1499 if (bp
->b_wcred
== NOCRED
&& cred
!= NOCRED
)
1500 bp
->b_wcred
= crhold(cred
);
1503 if (bp
->b_flags
& B_REMFREE
)
1506 TAILQ_INSERT_TAIL(&nmp
->nm_bufq
, bp
, b_freelist
);
1508 if ((bp
->b_flags
& B_DIRECT
) && bp
->b_iocmd
== BIO_WRITE
) {
1509 mtx_lock(&(VTONFS(bp
->b_vp
))->n_mtx
);
1510 VTONFS(bp
->b_vp
)->n_flag
|= NMODIFIED
;
1511 VTONFS(bp
->b_vp
)->n_directio_asyncwr
++;
1512 mtx_unlock(&(VTONFS(bp
->b_vp
))->n_mtx
);
1514 mtx_unlock(&ncl_iod_mutex
);
1518 mtx_unlock(&ncl_iod_mutex
);
1521 * All the iods are busy on other mounts, so return EIO to
1522 * force the caller to process the i/o synchronously.
1524 NFS_DPF(ASYNCIO
, ("ncl_asyncio: no iods available, i/o is synchronous\n"));
1529 ncl_doio_directwrite(struct buf
*bp
)
1531 int iomode
, must_commit
;
1532 struct uio
*uiop
= (struct uio
*)bp
->b_caller1
;
1533 char *iov_base
= uiop
->uio_iov
->iov_base
;
1535 iomode
= NFSWRITE_FILESYNC
;
1536 uiop
->uio_td
= NULL
; /* NULL since we're in nfsiod */
1537 ncl_writerpc(bp
->b_vp
, uiop
, bp
->b_wcred
, &iomode
, &must_commit
, 0);
1538 KASSERT((must_commit
== 0), ("ncl_doio_directwrite: Did not commit write"));
1539 free(iov_base
, M_NFSDIRECTIO
);
1540 free(uiop
->uio_iov
, M_NFSDIRECTIO
);
1541 free(uiop
, M_NFSDIRECTIO
);
1542 if ((bp
->b_flags
& B_DIRECT
) && bp
->b_iocmd
== BIO_WRITE
) {
1543 struct nfsnode
*np
= VTONFS(bp
->b_vp
);
1544 mtx_lock(&np
->n_mtx
);
1545 if (NFSHASPNFS(VFSTONFS(vnode_mount(bp
->b_vp
)))) {
1547 * Invalidate the attribute cache, since writes to a DS
1548 * won't update the size attribute.
1550 np
->n_attrstamp
= 0;
1552 np
->n_directio_asyncwr
--;
1553 if (np
->n_directio_asyncwr
== 0) {
1554 np
->n_flag
&= ~NMODIFIED
;
1555 if ((np
->n_flag
& NFSYNCWAIT
)) {
1556 np
->n_flag
&= ~NFSYNCWAIT
;
1557 wakeup((caddr_t
)&np
->n_directio_asyncwr
);
1560 mtx_unlock(&np
->n_mtx
);
1563 relpbuf(bp
, &ncl_pbuf_freecnt
);
1567 * Do an I/O operation to/from a cache block. This may be called
1568 * synchronously or from an nfsiod.
1571 ncl_doio(struct vnode
*vp
, struct buf
*bp
, struct ucred
*cr
, struct thread
*td
,
1572 int called_from_strategy
)
1576 struct nfsmount
*nmp
;
1577 int error
= 0, iomode
, must_commit
= 0;
1580 struct proc
*p
= td
? td
->td_proc
: NULL
;
1584 nmp
= VFSTONFS(vp
->v_mount
);
1586 uiop
->uio_iov
= &io
;
1587 uiop
->uio_iovcnt
= 1;
1588 uiop
->uio_segflg
= UIO_SYSSPACE
;
1592 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We
1593 * do this here so we do not have to do it in all the code that
1596 bp
->b_flags
&= ~B_INVAL
;
1597 bp
->b_ioflags
&= ~BIO_ERROR
;
1599 KASSERT(!(bp
->b_flags
& B_DONE
), ("ncl_doio: bp %p already marked done", bp
));
1600 iocmd
= bp
->b_iocmd
;
1601 if (iocmd
== BIO_READ
) {
1602 io
.iov_len
= uiop
->uio_resid
= bp
->b_bcount
;
1603 io
.iov_base
= bp
->b_data
;
1604 uiop
->uio_rw
= UIO_READ
;
1606 switch (vp
->v_type
) {
1608 uiop
->uio_offset
= ((off_t
)bp
->b_blkno
) * DEV_BSIZE
;
1609 NFSINCRGLOBAL(newnfsstats
.read_bios
);
1610 error
= ncl_readrpc(vp
, uiop
, cr
);
1613 if (uiop
->uio_resid
) {
1615 * If we had a short read with no error, we must have
1616 * hit a file hole. We should zero-fill the remainder.
1617 * This can also occur if the server hits the file EOF.
1619 * Holes used to be able to occur due to pending
1620 * writes, but that is not possible any longer.
1622 int nread
= bp
->b_bcount
- uiop
->uio_resid
;
1623 ssize_t left
= uiop
->uio_resid
;
1626 bzero((char *)bp
->b_data
+ nread
, left
);
1627 uiop
->uio_resid
= 0;
1630 /* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */
1631 if (p
&& (vp
->v_vflag
& VV_TEXT
)) {
1632 mtx_lock(&np
->n_mtx
);
1633 if (NFS_TIMESPEC_COMPARE(&np
->n_mtime
, &np
->n_vattr
.na_mtime
)) {
1634 mtx_unlock(&np
->n_mtx
);
1636 killproc(p
, "text file modification");
1639 mtx_unlock(&np
->n_mtx
);
1643 uiop
->uio_offset
= (off_t
)0;
1644 NFSINCRGLOBAL(newnfsstats
.readlink_bios
);
1645 error
= ncl_readlinkrpc(vp
, uiop
, cr
);
1648 NFSINCRGLOBAL(newnfsstats
.readdir_bios
);
1649 uiop
->uio_offset
= ((u_quad_t
)bp
->b_lblkno
) * NFS_DIRBLKSIZ
;
1650 if ((nmp
->nm_flag
& NFSMNT_RDIRPLUS
) != 0) {
1651 error
= ncl_readdirplusrpc(vp
, uiop
, cr
, td
);
1652 if (error
== NFSERR_NOTSUPP
)
1653 nmp
->nm_flag
&= ~NFSMNT_RDIRPLUS
;
1655 if ((nmp
->nm_flag
& NFSMNT_RDIRPLUS
) == 0)
1656 error
= ncl_readdirrpc(vp
, uiop
, cr
, td
);
1658 * end-of-directory sets B_INVAL but does not generate an
1661 if (error
== 0 && uiop
->uio_resid
== bp
->b_bcount
)
1662 bp
->b_flags
|= B_INVAL
;
1665 ncl_printf("ncl_doio: type %x unexpected\n", vp
->v_type
);
1669 bp
->b_ioflags
|= BIO_ERROR
;
1670 bp
->b_error
= error
;
1674 * If we only need to commit, try to commit
1676 if (bp
->b_flags
& B_NEEDCOMMIT
) {
1680 off
= ((u_quad_t
)bp
->b_blkno
) * DEV_BSIZE
+ bp
->b_dirtyoff
;
1681 retv
= ncl_commit(vp
, off
, bp
->b_dirtyend
-bp
->b_dirtyoff
,
1684 bp
->b_dirtyoff
= bp
->b_dirtyend
= 0;
1685 bp
->b_flags
&= ~(B_NEEDCOMMIT
| B_CLUSTEROK
);
1690 if (retv
== NFSERR_STALEWRITEVERF
) {
1691 ncl_clearcommit(vp
->v_mount
);
1696 * Setup for actual write
1698 mtx_lock(&np
->n_mtx
);
1699 if ((off_t
)bp
->b_blkno
* DEV_BSIZE
+ bp
->b_dirtyend
> np
->n_size
)
1700 bp
->b_dirtyend
= np
->n_size
- (off_t
)bp
->b_blkno
* DEV_BSIZE
;
1701 mtx_unlock(&np
->n_mtx
);
1703 if (bp
->b_dirtyend
> bp
->b_dirtyoff
) {
1704 io
.iov_len
= uiop
->uio_resid
= bp
->b_dirtyend
1706 uiop
->uio_offset
= (off_t
)bp
->b_blkno
* DEV_BSIZE
1708 io
.iov_base
= (char *)bp
->b_data
+ bp
->b_dirtyoff
;
1709 uiop
->uio_rw
= UIO_WRITE
;
1710 NFSINCRGLOBAL(newnfsstats
.write_bios
);
1712 if ((bp
->b_flags
& (B_ASYNC
| B_NEEDCOMMIT
| B_NOCACHE
| B_CLUSTER
)) == B_ASYNC
)
1713 iomode
= NFSWRITE_UNSTABLE
;
1715 iomode
= NFSWRITE_FILESYNC
;
1717 error
= ncl_writerpc(vp
, uiop
, cr
, &iomode
, &must_commit
,
1718 called_from_strategy
);
1721 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1722 * to cluster the buffers needing commit. This will allow
1723 * the system to submit a single commit rpc for the whole
1724 * cluster. We can do this even if the buffer is not 100%
1725 * dirty (relative to the NFS blocksize), so we optimize the
1726 * append-to-file-case.
1728 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1729 * cleared because write clustering only works for commit
1730 * rpc's, not for the data portion of the write).
1733 if (!error
&& iomode
== NFSWRITE_UNSTABLE
) {
1734 bp
->b_flags
|= B_NEEDCOMMIT
;
1735 if (bp
->b_dirtyoff
== 0
1736 && bp
->b_dirtyend
== bp
->b_bcount
)
1737 bp
->b_flags
|= B_CLUSTEROK
;
1739 bp
->b_flags
&= ~(B_NEEDCOMMIT
| B_CLUSTEROK
);
1743 * For an interrupted write, the buffer is still valid
1744 * and the write hasn't been pushed to the server yet,
1745 * so we can't set BIO_ERROR and report the interruption
1746 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1747 * is not relevant, so the rpc attempt is essentially
1748 * a noop. For the case of a V3 write rpc not being
1749 * committed to stable storage, the block is still
1750 * dirty and requires either a commit rpc or another
1751 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1752 * the block is reused. This is indicated by setting
1753 * the B_DELWRI and B_NEEDCOMMIT flags.
1755 * EIO is returned by ncl_writerpc() to indicate a recoverable
1756 * write error and is handled as above, except that
1757 * B_EINTR isn't set. One cause of this is a stale stateid
1758 * error for the RPC that indicates recovery is required,
1759 * when called with called_from_strategy != 0.
1761 * If the buffer is marked B_PAGING, it does not reside on
1762 * the vp's paging queues so we cannot call bdirty(). The
1763 * bp in this case is not an NFS cache block so we should
1766 * The logic below breaks up errors into recoverable and
1767 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE
1768 * and keep the buffer around for potential write retries.
1769 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL)
1770 * and save the error in the nfsnode. This is less than ideal
1771 * but necessary. Keeping such buffers around could potentially
1772 * cause buffer exhaustion eventually (they can never be written
1773 * out, so will get constantly be re-dirtied). It also causes
1774 * all sorts of vfs panics. For non-recoverable write errors,
1775 * also invalidate the attrcache, so we'll be forced to go over
1776 * the wire for this object, returning an error to user on next
1777 * call (most of the time).
1779 if (error
== EINTR
|| error
== EIO
|| error
== ETIMEDOUT
1780 || (!error
&& (bp
->b_flags
& B_NEEDCOMMIT
))) {
1784 bp
->b_flags
&= ~(B_INVAL
|B_NOCACHE
);
1785 if ((bp
->b_flags
& B_PAGING
) == 0) {
1787 bp
->b_flags
&= ~B_DONE
;
1789 if ((error
== EINTR
|| error
== ETIMEDOUT
) &&
1790 (bp
->b_flags
& B_ASYNC
) == 0)
1791 bp
->b_flags
|= B_EINTR
;
1795 bp
->b_ioflags
|= BIO_ERROR
;
1796 bp
->b_flags
|= B_INVAL
;
1797 bp
->b_error
= np
->n_error
= error
;
1798 mtx_lock(&np
->n_mtx
);
1799 np
->n_flag
|= NWRITEERR
;
1800 np
->n_attrstamp
= 0;
1801 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp
);
1802 mtx_unlock(&np
->n_mtx
);
1804 bp
->b_dirtyoff
= bp
->b_dirtyend
= 0;
1812 bp
->b_resid
= uiop
->uio_resid
;
1814 ncl_clearcommit(vp
->v_mount
);
1820 * Used to aid in handling ftruncate() operations on the NFS client side.
1821 * Truncation creates a number of special problems for NFS. We have to
1822 * throw away VM pages and buffer cache buffers that are beyond EOF, and
1823 * we have to properly handle VM pages or (potentially dirty) buffers
1824 * that straddle the truncation point.
1828 ncl_meta_setsize(struct vnode
*vp
, struct ucred
*cred
, struct thread
*td
, u_quad_t nsize
)
1830 struct nfsnode
*np
= VTONFS(vp
);
1832 int biosize
= vp
->v_bufobj
.bo_bsize
;
1835 mtx_lock(&np
->n_mtx
);
1838 mtx_unlock(&np
->n_mtx
);
1840 if (nsize
< tsize
) {
1846 * vtruncbuf() doesn't get the buffer overlapping the
1847 * truncation point. We may have a B_DELWRI and/or B_CACHE
1848 * buffer that now needs to be truncated.
1850 error
= vtruncbuf(vp
, cred
, nsize
, biosize
);
1851 lbn
= nsize
/ biosize
;
1852 bufsize
= nsize
- (lbn
* biosize
);
1853 bp
= nfs_getcacheblk(vp
, lbn
, bufsize
, td
);
1856 if (bp
->b_dirtyoff
> bp
->b_bcount
)
1857 bp
->b_dirtyoff
= bp
->b_bcount
;
1858 if (bp
->b_dirtyend
> bp
->b_bcount
)
1859 bp
->b_dirtyend
= bp
->b_bcount
;
1860 bp
->b_flags
|= B_RELBUF
; /* don't leave garbage around */
1863 vnode_pager_setsize(vp
, nsize
);