2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
37 * $FreeBSD: /repoman/r/ncvs/src/sys/nfsclient/nfs_bio.c,v 1.130 2004/04/14 23:23:55 peadar Exp $
38 * $DragonFly: src/sys/vfs/nfs/nfs_bio.c,v 1.45 2008/07/18 00:09:39 dillon Exp $
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/resourcevar.h>
45 #include <sys/signalvar.h>
48 #include <sys/vnode.h>
49 #include <sys/mount.h>
50 #include <sys/kernel.h>
52 #include <sys/msfbuf.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_page.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_pager.h>
59 #include <vm/vnode_pager.h>
62 #include <sys/thread2.h>
63 #include <vm/vm_page2.h>
71 #include "nfsm_subs.h"
74 static struct buf
*nfs_getcacheblk(struct vnode
*vp
, off_t loffset
,
75 int size
, struct thread
*td
);
76 static int nfs_check_dirent(struct nfs_dirent
*dp
, int maxlen
);
77 static void nfsiodone_sync(struct bio
*bio
);
78 static void nfs_readrpc_bio_done(nfsm_info_t info
);
79 static void nfs_writerpc_bio_done(nfsm_info_t info
);
80 static void nfs_commitrpc_bio_done(nfsm_info_t info
);
83 * Vnode op for VM getpages.
85 * nfs_getpages(struct vnode *a_vp, vm_page_t *a_m, int a_count,
86 * int a_reqpage, vm_ooffset_t a_offset)
89 nfs_getpages(struct vop_getpages_args
*ap
)
91 struct thread
*td
= curthread
; /* XXX */
92 int i
, error
, nextoff
, size
, toff
, count
, npages
;
103 nmp
= VFSTONFS(vp
->v_mount
);
107 if (vp
->v_object
== NULL
) {
108 kprintf("nfs_getpages: called with non-merged cache vnode??\n");
109 return VM_PAGER_ERROR
;
112 if ((nmp
->nm_flag
& NFSMNT_NFSV3
) != 0 &&
113 (nmp
->nm_state
& NFSSTA_GOTFSINFO
) == 0)
114 (void)nfs_fsinfo(nmp
, vp
, td
);
116 npages
= btoc(count
);
119 * NOTE that partially valid pages may occur in cases other
120 * then file EOF, such as when a file is partially written and
121 * ftruncate()-extended to a larger size. It is also possible
122 * for the valid bits to be set on garbage beyond the file EOF and
123 * clear in the area before EOF (e.g. m->valid == 0xfc), which can
124 * occur due to vtruncbuf() and the buffer cache's handling of
125 * pages which 'straddle' buffers or when b_bufsize is not a
126 * multiple of PAGE_SIZE.... the buffer cache cannot normally
127 * clear the extra bits. This kind of situation occurs when you
128 * make a small write() (m->valid == 0x03) and then mmap() and
129 * fault in the buffer(m->valid = 0xFF). When NFS flushes the
130 * buffer (vinvalbuf() m->valid = 0xFC) we are left with a mess.
132 * This is combined with the possibility that the pages are partially
133 * dirty or that there is a buffer backing the pages that is dirty
134 * (even if m->dirty is 0).
136 * To solve this problem several hacks have been made: (1) NFS
137 * guarentees that the IO block size is a multiple of PAGE_SIZE and
138 * (2) The buffer cache, when invalidating an NFS buffer, will
139 * disregard the buffer's fragmentory b_bufsize and invalidate
140 * the whole page rather then just the piece the buffer owns.
142 * This allows us to assume that a partially valid page found here
143 * is fully valid (vm_fault will zero'd out areas of the page not
146 m
= pages
[ap
->a_reqpage
];
148 for (i
= 0; i
< npages
; ++i
) {
149 if (i
!= ap
->a_reqpage
)
150 vnode_pager_freepage(pages
[i
]);
156 * Use an MSF_BUF as a medium to retrieve data from the pages.
158 msf_map_pagelist(&msf
, pages
, npages
, 0);
160 kva
= msf_buf_kva(msf
);
166 uio
.uio_offset
= IDX_TO_OFF(pages
[0]->pindex
);
167 uio
.uio_resid
= count
;
168 uio
.uio_segflg
= UIO_SYSSPACE
;
169 uio
.uio_rw
= UIO_READ
;
172 error
= nfs_readrpc_uio(vp
, &uio
);
175 if (error
&& ((int)uio
.uio_resid
== count
)) {
176 kprintf("nfs_getpages: error %d\n", error
);
177 for (i
= 0; i
< npages
; ++i
) {
178 if (i
!= ap
->a_reqpage
)
179 vnode_pager_freepage(pages
[i
]);
181 return VM_PAGER_ERROR
;
185 * Calculate the number of bytes read and validate only that number
186 * of bytes. Note that due to pending writes, size may be 0. This
187 * does not mean that the remaining data is invalid!
190 size
= count
- (int)uio
.uio_resid
;
192 for (i
= 0, toff
= 0; i
< npages
; i
++, toff
= nextoff
) {
193 nextoff
= toff
+ PAGE_SIZE
;
196 m
->flags
&= ~PG_ZERO
;
199 * NOTE: vm_page_undirty/clear_dirty etc do not clear the
202 if (nextoff
<= size
) {
204 * Read operation filled an entire page
206 m
->valid
= VM_PAGE_BITS_ALL
;
208 } else if (size
> toff
) {
210 * Read operation filled a partial page.
213 vm_page_set_valid(m
, 0, size
- toff
);
214 vm_page_clear_dirty_end_nonincl(m
, 0, size
- toff
);
215 /* handled by vm_fault now */
216 /* vm_page_zero_invalid(m, TRUE); */
219 * Read operation was short. If no error occured
220 * we may have hit a zero-fill section. We simply
221 * leave valid set to 0.
225 if (i
!= ap
->a_reqpage
) {
227 * Whether or not to leave the page activated is up in
228 * the air, but we should put the page on a page queue
229 * somewhere (it already is in the object). Result:
230 * It appears that emperical results show that
231 * deactivating pages is best.
235 * Just in case someone was asking for this page we
236 * now tell them that it is ok to use.
239 if (m
->flags
& PG_WANTED
)
242 vm_page_deactivate(m
);
245 vnode_pager_freepage(m
);
253 * Vnode op for VM putpages.
255 * The pmap modified bit was cleared prior to the putpages and probably
256 * couldn't get set again until after our I/O completed, since the page
257 * should not be mapped. But don't count on it. The m->dirty bits must
258 * be completely cleared when we finish even if the count is truncated.
260 * nfs_putpages(struct vnode *a_vp, vm_page_t *a_m, int a_count, int a_sync,
261 * int *a_rtvals, vm_ooffset_t a_offset)
264 nfs_putpages(struct vop_putpages_args
*ap
)
266 struct thread
*td
= curthread
;
270 int iomode
, must_commit
, i
, error
, npages
, count
;
274 struct nfsmount
*nmp
;
281 nmp
= VFSTONFS(vp
->v_mount
);
284 rtvals
= ap
->a_rtvals
;
285 npages
= btoc(count
);
286 offset
= IDX_TO_OFF(pages
[0]->pindex
);
288 if ((nmp
->nm_flag
& NFSMNT_NFSV3
) != 0 &&
289 (nmp
->nm_state
& NFSSTA_GOTFSINFO
) == 0)
290 (void)nfs_fsinfo(nmp
, vp
, td
);
292 for (i
= 0; i
< npages
; i
++) {
293 rtvals
[i
] = VM_PAGER_AGAIN
;
297 * When putting pages, do not extend file past EOF.
300 if (offset
+ count
> np
->n_size
) {
301 count
= np
->n_size
- offset
;
307 * Use an MSF_BUF as a medium to retrieve data from the pages.
309 msf_map_pagelist(&msf
, pages
, npages
, 0);
311 kva
= msf_buf_kva(msf
);
317 uio
.uio_offset
= offset
;
318 uio
.uio_resid
= (size_t)count
;
319 uio
.uio_segflg
= UIO_SYSSPACE
;
320 uio
.uio_rw
= UIO_WRITE
;
323 if ((ap
->a_sync
& VM_PAGER_PUT_SYNC
) == 0)
324 iomode
= NFSV3WRITE_UNSTABLE
;
326 iomode
= NFSV3WRITE_FILESYNC
;
328 error
= nfs_writerpc_uio(vp
, &uio
, &iomode
, &must_commit
);
335 nwritten
= round_page(count
- (int)uio
.uio_resid
) / PAGE_SIZE
;
336 for (i
= 0; i
< nwritten
; i
++) {
337 rtvals
[i
] = VM_PAGER_OK
;
338 vm_page_undirty(pages
[i
]);
341 nfs_clearcommit(vp
->v_mount
);
347 * Vnode op for read using bio
350 nfs_bioread(struct vnode
*vp
, struct uio
*uio
, int ioflag
)
352 struct nfsnode
*np
= VTONFS(vp
);
354 struct buf
*bp
, *rabp
;
357 struct nfsmount
*nmp
= VFSTONFS(vp
->v_mount
);
367 if (uio
->uio_rw
!= UIO_READ
)
368 panic("nfs_read mode");
370 if (uio
->uio_resid
== 0)
372 if (uio
->uio_offset
< 0) /* XXX VDIR cookies can be negative */
376 if ((nmp
->nm_flag
& NFSMNT_NFSV3
) != 0 &&
377 (nmp
->nm_state
& NFSSTA_GOTFSINFO
) == 0)
378 (void)nfs_fsinfo(nmp
, vp
, td
);
379 if (vp
->v_type
!= VDIR
&&
380 (uio
->uio_offset
+ uio
->uio_resid
) > nmp
->nm_maxfilesize
)
382 biosize
= vp
->v_mount
->mnt_stat
.f_iosize
;
383 seqcount
= (int)((off_t
)(ioflag
>> IO_SEQSHIFT
) * biosize
/ BKVASIZE
);
386 * For nfs, cache consistency can only be maintained approximately.
387 * Although RFC1094 does not specify the criteria, the following is
388 * believed to be compatible with the reference port.
390 * NFS: If local changes have been made and this is a
391 * directory, the directory must be invalidated and
392 * the attribute cache must be cleared.
394 * GETATTR is called to synchronize the file size.
396 * If remote changes are detected local data is flushed
397 * and the cache is invalidated.
399 * NOTE: In the normal case the attribute cache is not
400 * cleared which means GETATTR may use cached data and
401 * not immediately detect changes made on the server.
403 if ((np
->n_flag
& NLMODIFIED
) && vp
->v_type
== VDIR
) {
405 error
= nfs_vinvalbuf(vp
, V_SAVE
, 1);
410 error
= VOP_GETATTR(vp
, &vattr
);
413 if (np
->n_flag
& NRMODIFIED
) {
414 if (vp
->v_type
== VDIR
)
416 error
= nfs_vinvalbuf(vp
, V_SAVE
, 1);
419 np
->n_flag
&= ~NRMODIFIED
;
423 * Loop until uio exhausted or we hit EOF
428 switch (vp
->v_type
) {
430 nfsstats
.biocache_reads
++;
431 lbn
= uio
->uio_offset
/ biosize
;
432 boff
= uio
->uio_offset
& (biosize
- 1);
433 loffset
= (off_t
)lbn
* biosize
;
436 * Start the read ahead(s), as required.
438 if (nmp
->nm_readahead
> 0 && nfs_asyncok(nmp
)) {
439 for (nra
= 0; nra
< nmp
->nm_readahead
&& nra
< seqcount
&&
440 (off_t
)(lbn
+ 1 + nra
) * biosize
< np
->n_size
; nra
++) {
441 rabn
= lbn
+ 1 + nra
;
442 raoffset
= (off_t
)rabn
* biosize
;
443 if (findblk(vp
, raoffset
, FINDBLK_TEST
) == NULL
) {
444 rabp
= nfs_getcacheblk(vp
, raoffset
, biosize
, td
);
447 if ((rabp
->b_flags
& (B_CACHE
|B_DELWRI
)) == 0) {
448 rabp
->b_cmd
= BUF_CMD_READ
;
449 vfs_busy_pages(vp
, rabp
);
450 nfs_asyncio(vp
, &rabp
->b_bio2
);
459 * Obtain the buffer cache block. Figure out the buffer size
460 * when we are at EOF. If we are modifying the size of the
461 * buffer based on an EOF condition we need to hold
462 * nfs_rslock() through obtaining the buffer to prevent
463 * a potential writer-appender from messing with n_size.
464 * Otherwise we may accidently truncate the buffer and
467 * Note that bcount is *not* DEV_BSIZE aligned.
469 if (loffset
+ boff
>= np
->n_size
) {
473 bp
= nfs_getcacheblk(vp
, loffset
, biosize
, td
);
479 * If B_CACHE is not set, we must issue the read. If this
480 * fails, we return an error.
482 if ((bp
->b_flags
& B_CACHE
) == 0) {
483 bp
->b_cmd
= BUF_CMD_READ
;
484 bp
->b_bio2
.bio_done
= nfsiodone_sync
;
485 bp
->b_bio2
.bio_flags
|= BIO_SYNC
;
486 vfs_busy_pages(vp
, bp
);
487 error
= nfs_doio(vp
, &bp
->b_bio2
, td
);
495 * on is the offset into the current bp. Figure out how many
496 * bytes we can copy out of the bp. Note that bcount is
497 * NOT DEV_BSIZE aligned.
499 * Then figure out how many bytes we can copy into the uio.
502 if (n
> uio
->uio_resid
)
504 if (loffset
+ boff
+ n
> np
->n_size
)
505 n
= np
->n_size
- loffset
- boff
;
508 biosize
= min(NFS_MAXPATHLEN
, np
->n_size
);
509 nfsstats
.biocache_readlinks
++;
510 bp
= nfs_getcacheblk(vp
, (off_t
)0, biosize
, td
);
513 if ((bp
->b_flags
& B_CACHE
) == 0) {
514 bp
->b_cmd
= BUF_CMD_READ
;
515 bp
->b_bio2
.bio_done
= nfsiodone_sync
;
516 bp
->b_bio2
.bio_flags
|= BIO_SYNC
;
517 vfs_busy_pages(vp
, bp
);
518 error
= nfs_doio(vp
, &bp
->b_bio2
, td
);
520 bp
->b_flags
|= B_ERROR
| B_INVAL
;
525 n
= szmin(uio
->uio_resid
, (size_t)bp
->b_bcount
- bp
->b_resid
);
529 nfsstats
.biocache_readdirs
++;
530 if (np
->n_direofoffset
&&
531 uio
->uio_offset
>= np
->n_direofoffset
535 lbn
= (uoff_t
)uio
->uio_offset
/ NFS_DIRBLKSIZ
;
536 boff
= uio
->uio_offset
& (NFS_DIRBLKSIZ
- 1);
537 loffset
= uio
->uio_offset
- boff
;
538 bp
= nfs_getcacheblk(vp
, loffset
, NFS_DIRBLKSIZ
, td
);
542 if ((bp
->b_flags
& B_CACHE
) == 0) {
543 bp
->b_cmd
= BUF_CMD_READ
;
544 bp
->b_bio2
.bio_done
= nfsiodone_sync
;
545 bp
->b_bio2
.bio_flags
|= BIO_SYNC
;
546 vfs_busy_pages(vp
, bp
);
547 error
= nfs_doio(vp
, &bp
->b_bio2
, td
);
550 while (error
== NFSERR_BAD_COOKIE
) {
551 kprintf("got bad cookie vp %p bp %p\n", vp
, bp
);
553 error
= nfs_vinvalbuf(vp
, 0, 1);
555 * Yuck! The directory has been modified on the
556 * server. The only way to get the block is by
557 * reading from the beginning to get all the
560 * Leave the last bp intact unless there is an error.
561 * Loop back up to the while if the error is another
562 * NFSERR_BAD_COOKIE (double yuch!).
564 for (i
= 0; i
<= lbn
&& !error
; i
++) {
565 if (np
->n_direofoffset
566 && (i
* NFS_DIRBLKSIZ
) >= np
->n_direofoffset
)
568 bp
= nfs_getcacheblk(vp
, (off_t
)i
* NFS_DIRBLKSIZ
,
572 if ((bp
->b_flags
& B_CACHE
) == 0) {
573 bp
->b_cmd
= BUF_CMD_READ
;
574 bp
->b_bio2
.bio_done
= nfsiodone_sync
;
575 bp
->b_bio2
.bio_flags
|= BIO_SYNC
;
576 vfs_busy_pages(vp
, bp
);
577 error
= nfs_doio(vp
, &bp
->b_bio2
, td
);
579 * no error + B_INVAL == directory EOF,
582 if (error
== 0 && (bp
->b_flags
& B_INVAL
))
586 * An error will throw away the block and the
587 * for loop will break out. If no error and this
588 * is not the block we want, we throw away the
589 * block and go for the next one via the for loop.
591 if (error
|| i
< lbn
)
596 * The above while is repeated if we hit another cookie
597 * error. If we hit an error and it wasn't a cookie error,
605 * If not eof and read aheads are enabled, start one.
606 * (You need the current block first, so that you have the
607 * directory offset cookie of the next block.)
609 if (nmp
->nm_readahead
> 0 && nfs_asyncok(nmp
) &&
610 (bp
->b_flags
& B_INVAL
) == 0 &&
611 (np
->n_direofoffset
== 0 ||
612 loffset
+ NFS_DIRBLKSIZ
< np
->n_direofoffset
) &&
613 findblk(vp
, loffset
+ NFS_DIRBLKSIZ
, FINDBLK_TEST
) == NULL
615 rabp
= nfs_getcacheblk(vp
, loffset
+ NFS_DIRBLKSIZ
,
618 if ((rabp
->b_flags
& (B_CACHE
|B_DELWRI
)) == 0) {
619 rabp
->b_cmd
= BUF_CMD_READ
;
620 vfs_busy_pages(vp
, rabp
);
621 nfs_asyncio(vp
, &rabp
->b_bio2
);
628 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
629 * chopped for the EOF condition, we cannot tell how large
630 * NFS directories are going to be until we hit EOF. So
631 * an NFS directory buffer is *not* chopped to its EOF. Now,
632 * it just so happens that b_resid will effectively chop it
633 * to EOF. *BUT* this information is lost if the buffer goes
634 * away and is reconstituted into a B_CACHE state ( due to
635 * being VMIO ) later. So we keep track of the directory eof
636 * in np->n_direofoffset and chop it off as an extra step
639 * NOTE: boff could already be beyond EOF.
641 if ((size_t)boff
> NFS_DIRBLKSIZ
- bp
->b_resid
) {
644 n
= szmin(uio
->uio_resid
,
645 NFS_DIRBLKSIZ
- bp
->b_resid
- (size_t)boff
);
647 if (np
->n_direofoffset
&&
648 n
> (size_t)(np
->n_direofoffset
- uio
->uio_offset
)) {
649 n
= (size_t)(np
->n_direofoffset
- uio
->uio_offset
);
653 kprintf(" nfs_bioread: type %x unexpected\n",vp
->v_type
);
658 switch (vp
->v_type
) {
661 error
= uiomove(bp
->b_data
+ boff
, n
, uio
);
665 error
= uiomove(bp
->b_data
+ boff
, n
, uio
);
670 off_t old_off
= uio
->uio_offset
;
672 struct nfs_dirent
*dp
;
675 * We are casting cpos to nfs_dirent, it must be
683 cpos
= bp
->b_data
+ boff
;
684 epos
= bp
->b_data
+ boff
+ n
;
685 while (cpos
< epos
&& error
== 0 && uio
->uio_resid
> 0) {
686 dp
= (struct nfs_dirent
*)cpos
;
687 error
= nfs_check_dirent(dp
, (int)(epos
- cpos
));
690 if (vop_write_dirent(&error
, uio
, dp
->nfs_ino
,
691 dp
->nfs_type
, dp
->nfs_namlen
, dp
->nfs_name
)) {
694 cpos
+= dp
->nfs_reclen
;
698 uio
->uio_offset
= old_off
+ cpos
-
704 kprintf(" nfs_bioread: type %x unexpected\n",vp
->v_type
);
708 } while (error
== 0 && uio
->uio_resid
> 0 && n
> 0);
713 * Userland can supply any 'seek' offset when reading a NFS directory.
714 * Validate the structure so we don't panic the kernel. Note that
715 * the element name is nul terminated and the nul is not included
720 nfs_check_dirent(struct nfs_dirent
*dp
, int maxlen
)
722 int nfs_name_off
= offsetof(struct nfs_dirent
, nfs_name
[0]);
724 if (nfs_name_off
>= maxlen
)
726 if (dp
->nfs_reclen
< nfs_name_off
|| dp
->nfs_reclen
> maxlen
)
728 if (nfs_name_off
+ dp
->nfs_namlen
>= dp
->nfs_reclen
)
730 if (dp
->nfs_reclen
& 3)
736 * Vnode op for write using bio
738 * nfs_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
739 * struct ucred *a_cred)
742 nfs_write(struct vop_write_args
*ap
)
744 struct uio
*uio
= ap
->a_uio
;
745 struct thread
*td
= uio
->uio_td
;
746 struct vnode
*vp
= ap
->a_vp
;
747 struct nfsnode
*np
= VTONFS(vp
);
748 int ioflag
= ap
->a_ioflag
;
751 struct nfsmount
*nmp
= VFSTONFS(vp
->v_mount
);
760 if (uio
->uio_rw
!= UIO_WRITE
)
761 panic("nfs_write mode");
762 if (uio
->uio_segflg
== UIO_USERSPACE
&& uio
->uio_td
!= curthread
)
763 panic("nfs_write proc");
765 if (vp
->v_type
!= VREG
)
767 if (np
->n_flag
& NWRITEERR
) {
768 np
->n_flag
&= ~NWRITEERR
;
769 return (np
->n_error
);
771 if ((nmp
->nm_flag
& NFSMNT_NFSV3
) != 0 &&
772 (nmp
->nm_state
& NFSSTA_GOTFSINFO
) == 0)
773 (void)nfs_fsinfo(nmp
, vp
, td
);
776 * Synchronously flush pending buffers if we are in synchronous
777 * mode or if we are appending.
779 if (ioflag
& (IO_APPEND
| IO_SYNC
)) {
780 if (np
->n_flag
& NLMODIFIED
) {
782 error
= nfs_flush(vp
, MNT_WAIT
, td
, 0);
783 /* error = nfs_vinvalbuf(vp, V_SAVE, 1); */
790 * If IO_APPEND then load uio_offset. We restart here if we cannot
791 * get the append lock.
794 if (ioflag
& IO_APPEND
) {
796 error
= VOP_GETATTR(vp
, &vattr
);
799 uio
->uio_offset
= np
->n_size
;
802 if (uio
->uio_offset
< 0)
804 if ((uio
->uio_offset
+ uio
->uio_resid
) > nmp
->nm_maxfilesize
)
806 if (uio
->uio_resid
== 0)
810 * We need to obtain the rslock if we intend to modify np->n_size
811 * in order to guarentee the append point with multiple contending
812 * writers, to guarentee that no other appenders modify n_size
813 * while we are trying to obtain a truncated buffer (i.e. to avoid
814 * accidently truncating data written by another appender due to
815 * the race), and to ensure that the buffer is populated prior to
816 * our extending of the file. We hold rslock through the entire
819 * Note that we do not synchronize the case where someone truncates
820 * the file while we are appending to it because attempting to lock
821 * this case may deadlock other parts of the system unexpectedly.
823 if ((ioflag
& IO_APPEND
) ||
824 uio
->uio_offset
+ uio
->uio_resid
> np
->n_size
) {
825 switch(nfs_rslock(np
)) {
840 * Maybe this should be above the vnode op call, but so long as
841 * file servers have no limits, i don't think it matters
843 if (td
->td_proc
&& uio
->uio_offset
+ uio
->uio_resid
>
844 td
->td_proc
->p_rlimit
[RLIMIT_FSIZE
].rlim_cur
) {
845 lwpsignal(td
->td_proc
, td
->td_lwp
, SIGXFSZ
);
851 biosize
= vp
->v_mount
->mnt_stat
.f_iosize
;
854 nfsstats
.biocache_writes
++;
855 boff
= uio
->uio_offset
& (biosize
-1);
856 loffset
= uio
->uio_offset
- boff
;
857 bytes
= (int)szmin((unsigned)(biosize
- boff
), uio
->uio_resid
);
860 * Handle direct append and file extension cases, calculate
861 * unaligned buffer size. When extending B_CACHE will be
862 * set if possible. See UIO_NOCOPY note below.
864 if (uio
->uio_offset
+ bytes
> np
->n_size
) {
865 np
->n_flag
|= NLMODIFIED
;
866 bp
= nfs_meta_setsize(vp
, td
, loffset
, boff
, bytes
);
868 bp
= nfs_getcacheblk(vp
, loffset
, biosize
, td
);
876 * Actual bytes in buffer which we care about
878 if (loffset
+ biosize
< np
->n_size
)
881 bcount
= (int)(np
->n_size
- loffset
);
884 * Avoid a read by setting B_CACHE where the data we
885 * intend to write covers the entire buffer. Note
886 * that the buffer may have been set to B_CACHE by
887 * nfs_meta_setsize() above or otherwise inherited the
888 * flag, but if B_CACHE isn't set the buffer may be
889 * uninitialized and must be zero'd to accomodate
890 * future seek+write's.
892 * See the comments in kern/vfs_bio.c's getblk() for
895 * When doing a UIO_NOCOPY write the buffer is not
896 * overwritten and we cannot just set B_CACHE unconditionally
897 * for full-block writes.
899 if (boff
== 0 && bytes
== biosize
&&
900 uio
->uio_segflg
!= UIO_NOCOPY
) {
901 bp
->b_flags
|= B_CACHE
;
902 bp
->b_flags
&= ~(B_ERROR
| B_INVAL
);
906 * b_resid may be set due to file EOF if we extended out.
907 * The NFS bio code will zero the difference anyway so
908 * just acknowledged the fact and set b_resid to 0.
910 if ((bp
->b_flags
& B_CACHE
) == 0) {
911 bp
->b_cmd
= BUF_CMD_READ
;
912 bp
->b_bio2
.bio_done
= nfsiodone_sync
;
913 bp
->b_bio2
.bio_flags
|= BIO_SYNC
;
914 vfs_busy_pages(vp
, bp
);
915 error
= nfs_doio(vp
, &bp
->b_bio2
, td
);
922 np
->n_flag
|= NLMODIFIED
;
925 * If dirtyend exceeds file size, chop it down. This should
926 * not normally occur but there is an append race where it
927 * might occur XXX, so we log it.
929 * If the chopping creates a reverse-indexed or degenerate
930 * situation with dirtyoff/end, we 0 both of them.
932 if (bp
->b_dirtyend
> bcount
) {
933 kprintf("NFS append race @%08llx:%d\n",
934 (long long)bp
->b_bio2
.bio_offset
,
935 bp
->b_dirtyend
- bcount
);
936 bp
->b_dirtyend
= bcount
;
939 if (bp
->b_dirtyoff
>= bp
->b_dirtyend
)
940 bp
->b_dirtyoff
= bp
->b_dirtyend
= 0;
943 * If the new write will leave a contiguous dirty
944 * area, just update the b_dirtyoff and b_dirtyend,
945 * otherwise force a write rpc of the old dirty area.
947 * While it is possible to merge discontiguous writes due to
948 * our having a B_CACHE buffer ( and thus valid read data
949 * for the hole), we don't because it could lead to
950 * significant cache coherency problems with multiple clients,
951 * especially if locking is implemented later on.
953 * as an optimization we could theoretically maintain
954 * a linked list of discontinuous areas, but we would still
955 * have to commit them separately so there isn't much
956 * advantage to it except perhaps a bit of asynchronization.
958 if (bp
->b_dirtyend
> 0 &&
959 (boff
> bp
->b_dirtyend
||
960 (boff
+ bytes
) < bp
->b_dirtyoff
)
962 if (bwrite(bp
) == EINTR
) {
969 error
= uiomove(bp
->b_data
+ boff
, bytes
, uio
);
972 * Since this block is being modified, it must be written
973 * again and not just committed. Since write clustering does
974 * not work for the stage 1 data write, only the stage 2
975 * commit rpc, we have to clear B_CLUSTEROK as well.
977 bp
->b_flags
&= ~(B_NEEDCOMMIT
| B_CLUSTEROK
);
985 * Only update dirtyoff/dirtyend if not a degenerate
988 * The underlying VM pages have been marked valid by
989 * virtue of acquiring the bp. Because the entire buffer
990 * is marked dirty we do not have to worry about cleaning
991 * out the related dirty bits (and wouldn't really know
992 * how to deal with byte ranges anyway)
995 if (bp
->b_dirtyend
> 0) {
996 bp
->b_dirtyoff
= imin(boff
, bp
->b_dirtyoff
);
997 bp
->b_dirtyend
= imax(boff
+ bytes
,
1000 bp
->b_dirtyoff
= boff
;
1001 bp
->b_dirtyend
= boff
+ bytes
;
1006 * If the lease is non-cachable or IO_SYNC do bwrite().
1008 * IO_INVAL appears to be unused. The idea appears to be
1009 * to turn off caching in this case. Very odd. XXX
1011 * If nfs_async is set bawrite() will use an unstable write
1012 * (build dirty bufs on the server), so we might as well
1013 * push it out with bawrite(). If nfs_async is not set we
1014 * use bdwrite() to cache dirty bufs on the client.
1016 if (ioflag
& IO_SYNC
) {
1017 if (ioflag
& IO_INVAL
)
1018 bp
->b_flags
|= B_NOCACHE
;
1022 } else if (boff
+ bytes
== biosize
&& nfs_async
) {
1027 } while (uio
->uio_resid
> 0 && bytes
> 0);
1036 * Get an nfs cache block.
1038 * Allocate a new one if the block isn't currently in the cache
1039 * and return the block marked busy. If the calling process is
1040 * interrupted by a signal for an interruptible mount point, return
1043 * The caller must carefully deal with the possible B_INVAL state of
1044 * the buffer. nfs_startio() clears B_INVAL (and nfs_asyncio() clears it
1045 * indirectly), so synchronous reads can be issued without worrying about
1046 * the B_INVAL state. We have to be a little more careful when dealing
1047 * with writes (see comments in nfs_write()) when extending a file past
1051 nfs_getcacheblk(struct vnode
*vp
, off_t loffset
, int size
, struct thread
*td
)
1055 struct nfsmount
*nmp
;
1060 if (nmp
->nm_flag
& NFSMNT_INT
) {
1061 bp
= getblk(vp
, loffset
, size
, GETBLK_PCATCH
, 0);
1062 while (bp
== NULL
) {
1063 if (nfs_sigintr(nmp
, NULL
, td
))
1065 bp
= getblk(vp
, loffset
, size
, 0, 2 * hz
);
1068 bp
= getblk(vp
, loffset
, size
, 0, 0);
1072 * bio2, the 'device' layer. Since BIOs use 64 bit byte offsets
1073 * now, no translation is necessary.
1075 bp
->b_bio2
.bio_offset
= loffset
;
1080 * Flush and invalidate all dirty buffers. If another process is already
1081 * doing the flush, just wait for completion.
1084 nfs_vinvalbuf(struct vnode
*vp
, int flags
, int intrflg
)
1086 struct nfsnode
*np
= VTONFS(vp
);
1087 struct nfsmount
*nmp
= VFSTONFS(vp
->v_mount
);
1088 int error
= 0, slpflag
, slptimeo
;
1089 thread_t td
= curthread
;
1091 if (vp
->v_flag
& VRECLAIMED
)
1094 if ((nmp
->nm_flag
& NFSMNT_INT
) == 0)
1104 * First wait for any other process doing a flush to complete.
1106 while (np
->n_flag
& NFLUSHINPROG
) {
1107 np
->n_flag
|= NFLUSHWANT
;
1108 error
= tsleep((caddr_t
)&np
->n_flag
, 0, "nfsvinval", slptimeo
);
1109 if (error
&& intrflg
&& nfs_sigintr(nmp
, NULL
, td
))
1114 * Now, flush as required.
1116 np
->n_flag
|= NFLUSHINPROG
;
1117 error
= vinvalbuf(vp
, flags
, slpflag
, 0);
1119 if (intrflg
&& nfs_sigintr(nmp
, NULL
, td
)) {
1120 np
->n_flag
&= ~NFLUSHINPROG
;
1121 if (np
->n_flag
& NFLUSHWANT
) {
1122 np
->n_flag
&= ~NFLUSHWANT
;
1123 wakeup((caddr_t
)&np
->n_flag
);
1127 error
= vinvalbuf(vp
, flags
, 0, slptimeo
);
1129 np
->n_flag
&= ~(NLMODIFIED
| NFLUSHINPROG
);
1130 if (np
->n_flag
& NFLUSHWANT
) {
1131 np
->n_flag
&= ~NFLUSHWANT
;
1132 wakeup((caddr_t
)&np
->n_flag
);
1138 * Return true (non-zero) if the txthread and rxthread are operational
1139 * and we do not already have too many not-yet-started BIO's built up.
1142 nfs_asyncok(struct nfsmount
*nmp
)
1144 return (nmp
->nm_bioqlen
< nfs_maxasyncbio
&&
1145 nmp
->nm_bioqlen
< nmp
->nm_maxasync_scaled
/ NFS_ASYSCALE
&&
1146 nmp
->nm_rxstate
<= NFSSVC_PENDING
&&
1147 nmp
->nm_txstate
<= NFSSVC_PENDING
);
1151 * The read-ahead code calls this to queue a bio to the txthread.
1153 * We don't touch the bio otherwise... that is, we do not even
1154 * construct or send the initial rpc. The txthread will do it
1157 * NOTE! nm_bioqlen is not decremented until the request completes,
1158 * so it does not reflect the number of bio's on bioq.
1161 nfs_asyncio(struct vnode
*vp
, struct bio
*bio
)
1163 struct buf
*bp
= bio
->bio_buf
;
1164 struct nfsmount
*nmp
= VFSTONFS(vp
->v_mount
);
1166 KKASSERT(vp
->v_tag
== VT_NFS
);
1168 bio
->bio_driver_info
= vp
;
1170 TAILQ_INSERT_TAIL(&nmp
->nm_bioq
, bio
, bio_act
);
1171 atomic_add_int(&nmp
->nm_bioqlen
, 1);
1173 nfssvc_iod_writer_wakeup(nmp
);
1177 * nfs_dio() - Execute a BIO operation synchronously. The BIO will be
1178 * completed and its error returned. The caller is responsible
1179 * for brelse()ing it. ONLY USE FOR BIO_SYNC IOs! Otherwise
1180 * our error probe will be against an invalid pointer.
1182 * nfs_startio()- Execute a BIO operation assynchronously.
1184 * NOTE: nfs_asyncio() is used to initiate an asynchronous BIO operation,
1185 * which basically just queues it to the txthread. nfs_startio()
1186 * actually initiates the I/O AFTER it has gotten to the txthread.
1188 * NOTE: td might be NULL.
1190 * NOTE: Caller has already busied the I/O.
1193 nfs_startio(struct vnode
*vp
, struct bio
*bio
, struct thread
*td
)
1195 struct buf
*bp
= bio
->bio_buf
;
1197 struct nfsmount
*nmp
;
1199 KKASSERT(vp
->v_tag
== VT_NFS
);
1201 nmp
= VFSTONFS(vp
->v_mount
);
1204 * clear B_ERROR and B_INVAL state prior to initiating the I/O. We
1205 * do this here so we do not have to do it in all the code that
1208 bp
->b_flags
&= ~(B_ERROR
| B_INVAL
);
1210 KASSERT(bp
->b_cmd
!= BUF_CMD_DONE
,
1211 ("nfs_doio: bp %p already marked done!", bp
));
1213 if (bp
->b_cmd
== BUF_CMD_READ
) {
1214 switch (vp
->v_type
) {
1216 nfsstats
.read_bios
++;
1217 nfs_readrpc_bio(vp
, bio
);
1221 bio
->bio_offset
= 0;
1222 nfsstats
.readlink_bios
++;
1223 nfs_readlinkrpc_bio(vp
, bio
);
1225 nfs_doio(vp
, bio
, td
);
1230 * NOTE: If nfs_readdirplusrpc_bio() is requested but
1231 * not supported, it will chain to
1232 * nfs_readdirrpc_bio().
1235 nfsstats
.readdir_bios
++;
1236 uiop
->uio_offset
= bio
->bio_offset
;
1237 if (nmp
->nm_flag
& NFSMNT_RDIRPLUS
)
1238 nfs_readdirplusrpc_bio(vp
, bio
);
1240 nfs_readdirrpc_bio(vp
, bio
);
1242 nfs_doio(vp
, bio
, td
);
1246 kprintf("nfs_doio: type %x unexpected\n",vp
->v_type
);
1247 bp
->b_flags
|= B_ERROR
;
1248 bp
->b_error
= EINVAL
;
1254 * If we only need to commit, try to commit. If this fails
1255 * it will chain through to the write. Basically all the logic
1256 * in nfs_doio() is replicated.
1258 KKASSERT(bp
->b_cmd
== BUF_CMD_WRITE
);
1259 if (bp
->b_flags
& B_NEEDCOMMIT
)
1260 nfs_commitrpc_bio(vp
, bio
);
1262 nfs_writerpc_bio(vp
, bio
);
1267 nfs_doio(struct vnode
*vp
, struct bio
*bio
, struct thread
*td
)
1269 struct buf
*bp
= bio
->bio_buf
;
1272 struct nfsmount
*nmp
;
1274 int iomode
, must_commit
;
1279 KKASSERT(vp
->v_tag
== VT_NFS
);
1281 nmp
= VFSTONFS(vp
->v_mount
);
1283 uiop
->uio_iov
= &io
;
1284 uiop
->uio_iovcnt
= 1;
1285 uiop
->uio_segflg
= UIO_SYSSPACE
;
1289 * clear B_ERROR and B_INVAL state prior to initiating the I/O. We
1290 * do this here so we do not have to do it in all the code that
1293 bp
->b_flags
&= ~(B_ERROR
| B_INVAL
);
1295 KASSERT(bp
->b_cmd
!= BUF_CMD_DONE
,
1296 ("nfs_doio: bp %p already marked done!", bp
));
1298 if (bp
->b_cmd
== BUF_CMD_READ
) {
1299 io
.iov_len
= uiop
->uio_resid
= (size_t)bp
->b_bcount
;
1300 io
.iov_base
= bp
->b_data
;
1301 uiop
->uio_rw
= UIO_READ
;
1303 switch (vp
->v_type
) {
1306 * When reading from a regular file zero-fill any residual.
1307 * Note that this residual has nothing to do with NFS short
1308 * reads, which nfs_readrpc_uio() will handle for us.
1310 * We have to do this because when we are write extending
1311 * a file the server may not have the same notion of
1312 * filesize as we do. Our BIOs should already be sized
1313 * (b_bcount) to account for the file EOF.
1315 nfsstats
.read_bios
++;
1316 uiop
->uio_offset
= bio
->bio_offset
;
1317 error
= nfs_readrpc_uio(vp
, uiop
);
1318 if (error
== 0 && uiop
->uio_resid
) {
1319 n
= (size_t)bp
->b_bcount
- uiop
->uio_resid
;
1320 bzero(bp
->b_data
+ n
, bp
->b_bcount
- n
);
1321 uiop
->uio_resid
= 0;
1323 if (td
&& td
->td_proc
&& (vp
->v_flag
& VTEXT
) &&
1324 np
->n_mtime
!= np
->n_vattr
.va_mtime
.tv_sec
) {
1325 uprintf("Process killed due to text file modification\n");
1326 ksignal(td
->td_proc
, SIGKILL
);
1330 uiop
->uio_offset
= 0;
1331 nfsstats
.readlink_bios
++;
1332 error
= nfs_readlinkrpc_uio(vp
, uiop
);
1335 nfsstats
.readdir_bios
++;
1336 uiop
->uio_offset
= bio
->bio_offset
;
1337 if (nmp
->nm_flag
& NFSMNT_RDIRPLUS
) {
1338 error
= nfs_readdirplusrpc_uio(vp
, uiop
);
1339 if (error
== NFSERR_NOTSUPP
)
1340 nmp
->nm_flag
&= ~NFSMNT_RDIRPLUS
;
1342 if ((nmp
->nm_flag
& NFSMNT_RDIRPLUS
) == 0)
1343 error
= nfs_readdirrpc_uio(vp
, uiop
);
1345 * end-of-directory sets B_INVAL but does not generate an
1348 if (error
== 0 && uiop
->uio_resid
== bp
->b_bcount
)
1349 bp
->b_flags
|= B_INVAL
;
1352 kprintf("nfs_doio: type %x unexpected\n",vp
->v_type
);
1356 bp
->b_flags
|= B_ERROR
;
1357 bp
->b_error
= error
;
1359 bp
->b_resid
= uiop
->uio_resid
;
1362 * If we only need to commit, try to commit.
1364 * NOTE: The I/O has already been staged for the write and
1365 * its pages busied, so b_dirtyoff/end is valid.
1367 KKASSERT(bp
->b_cmd
== BUF_CMD_WRITE
);
1368 if (bp
->b_flags
& B_NEEDCOMMIT
) {
1372 off
= bio
->bio_offset
+ bp
->b_dirtyoff
;
1373 retv
= nfs_commitrpc_uio(vp
, off
,
1374 bp
->b_dirtyend
- bp
->b_dirtyoff
,
1377 bp
->b_dirtyoff
= bp
->b_dirtyend
= 0;
1378 bp
->b_flags
&= ~(B_NEEDCOMMIT
| B_CLUSTEROK
);
1383 if (retv
== NFSERR_STALEWRITEVERF
) {
1384 nfs_clearcommit(vp
->v_mount
);
1389 * Setup for actual write
1391 if (bio
->bio_offset
+ bp
->b_dirtyend
> np
->n_size
)
1392 bp
->b_dirtyend
= np
->n_size
- bio
->bio_offset
;
1394 if (bp
->b_dirtyend
> bp
->b_dirtyoff
) {
1395 io
.iov_len
= uiop
->uio_resid
= bp
->b_dirtyend
1397 uiop
->uio_offset
= bio
->bio_offset
+ bp
->b_dirtyoff
;
1398 io
.iov_base
= (char *)bp
->b_data
+ bp
->b_dirtyoff
;
1399 uiop
->uio_rw
= UIO_WRITE
;
1400 nfsstats
.write_bios
++;
1402 if ((bp
->b_flags
& (B_NEEDCOMMIT
| B_NOCACHE
| B_CLUSTER
)) == 0)
1403 iomode
= NFSV3WRITE_UNSTABLE
;
1405 iomode
= NFSV3WRITE_FILESYNC
;
1408 error
= nfs_writerpc_uio(vp
, uiop
, &iomode
, &must_commit
);
1411 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1412 * to cluster the buffers needing commit. This will allow
1413 * the system to submit a single commit rpc for the whole
1414 * cluster. We can do this even if the buffer is not 100%
1415 * dirty (relative to the NFS blocksize), so we optimize the
1416 * append-to-file-case.
1418 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1419 * cleared because write clustering only works for commit
1420 * rpc's, not for the data portion of the write).
1423 if (!error
&& iomode
== NFSV3WRITE_UNSTABLE
) {
1424 bp
->b_flags
|= B_NEEDCOMMIT
;
1425 if (bp
->b_dirtyoff
== 0
1426 && bp
->b_dirtyend
== bp
->b_bcount
)
1427 bp
->b_flags
|= B_CLUSTEROK
;
1429 bp
->b_flags
&= ~(B_NEEDCOMMIT
| B_CLUSTEROK
);
1433 * For an interrupted write, the buffer is still valid
1434 * and the write hasn't been pushed to the server yet,
1435 * so we can't set B_ERROR and report the interruption
1436 * by setting B_EINTR. For the async case, B_EINTR
1437 * is not relevant, so the rpc attempt is essentially
1438 * a noop. For the case of a V3 write rpc not being
1439 * committed to stable storage, the block is still
1440 * dirty and requires either a commit rpc or another
1441 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1442 * the block is reused. This is indicated by setting
1443 * the B_DELWRI and B_NEEDCOMMIT flags.
1445 * If the buffer is marked B_PAGING, it does not reside on
1446 * the vp's paging queues so we cannot call bdirty(). The
1447 * bp in this case is not an NFS cache block so we should
1451 || (!error
&& (bp
->b_flags
& B_NEEDCOMMIT
))) {
1453 bp
->b_flags
&= ~(B_INVAL
|B_NOCACHE
);
1454 if ((bp
->b_flags
& B_PAGING
) == 0)
1457 bp
->b_flags
|= B_EINTR
;
1461 bp
->b_flags
|= B_ERROR
;
1462 bp
->b_error
= np
->n_error
= error
;
1463 np
->n_flag
|= NWRITEERR
;
1465 bp
->b_dirtyoff
= bp
->b_dirtyend
= 0;
1468 nfs_clearcommit(vp
->v_mount
);
1469 bp
->b_resid
= uiop
->uio_resid
;
1476 * I/O was run synchronously, biodone() it and calculate the
1480 KKASSERT(bp
->b_cmd
== BUF_CMD_DONE
);
1481 if (bp
->b_flags
& B_EINTR
)
1483 if (bp
->b_flags
& B_ERROR
)
1484 return (bp
->b_error
? bp
->b_error
: EIO
);
1489 * Used to aid in handling ftruncate() and non-trivial write-extend
1490 * operations on the NFS client side. Note that trivial write-extend
1491 * operations (appending with no write hole) are handled by nfs_write()
1492 * directly to avoid silly flushes.
1494 * Truncation creates a number of special problems for NFS. We have to
1495 * throw away VM pages and buffer cache buffers that are beyond EOF, and
1496 * we have to properly handle VM pages or (potentially dirty) buffers
1497 * that straddle the truncation point.
1499 * File extension no longer has an issue now that the buffer size is
1500 * fixed. When extending the intended overwrite area is specified
1501 * by (boff, bytes). This function uses the parameters to determine
1502 * what areas must be zerod. If there are no gaps we set B_CACHE.
1505 nfs_meta_setsize(struct vnode
*vp
, struct thread
*td
, off_t nbase
,
1506 int boff
, int bytes
)
1509 struct nfsnode
*np
= VTONFS(vp
);
1510 off_t osize
= np
->n_size
;
1512 int biosize
= vp
->v_mount
->mnt_stat
.f_iosize
;
1516 nsize
= nbase
+ boff
+ bytes
;
1519 if (nsize
< osize
) {
1521 * vtruncbuf() doesn't get the buffer overlapping the
1522 * truncation point, but it will invalidate pages in
1523 * that buffer and zero the appropriate byte range in
1524 * the page straddling EOF.
1526 error
= vtruncbuf(vp
, nsize
, biosize
);
1529 * NFS doesn't do a good job tracking changes in the EOF
1530 * so it may not revisit the buffer if the file is extended.
1532 * After truncating just clear B_CACHE on the buffer
1533 * straddling EOF. If the buffer is dirty then clean
1534 * out the portion beyond the file EOF.
1539 bp
= nfs_getcacheblk(vp
, nbase
, biosize
, td
);
1540 if (bp
->b_flags
& B_DELWRI
) {
1541 if (bp
->b_dirtyoff
> bp
->b_bcount
)
1542 bp
->b_dirtyoff
= bp
->b_bcount
;
1543 if (bp
->b_dirtyend
> bp
->b_bcount
)
1544 bp
->b_dirtyend
= bp
->b_bcount
;
1545 boff
= (int)nsize
& (biosize
- 1);
1546 bzero(bp
->b_data
+ boff
, biosize
- boff
);
1547 } else if (nsize
!= nbase
) {
1548 boff
= (int)nsize
& (biosize
- 1);
1549 bzero(bp
->b_data
+ boff
, biosize
- boff
);
1554 * The newly expanded portions of the buffer should already
1555 * be zero'd out if B_CACHE is set. If B_CACHE is not
1556 * set and the buffer is beyond osize we can safely zero it
1557 * and set B_CACHE to avoid issuing unnecessary degenerate
1560 * Don't do this if the caller is going to overwrite the
1561 * entire buffer anyway (and also don't set B_CACHE!).
1562 * This allows the caller to optimize the operation.
1564 KKASSERT(nsize
>= 0);
1565 vnode_pager_setsize(vp
, (vm_ooffset_t
)nsize
);
1567 bp
= nfs_getcacheblk(vp
, nbase
, biosize
, td
);
1568 if ((bp
->b_flags
& B_CACHE
) == 0 && nbase
>= osize
&&
1569 !(boff
== 0 && bytes
== biosize
)
1571 bzero(bp
->b_data
, biosize
);
1572 bp
->b_flags
|= B_CACHE
;
1573 bp
->b_flags
&= ~(B_ERROR
| B_INVAL
);
1580 * Synchronous completion for nfs_doio. Call bpdone() with elseit=FALSE.
1581 * Caller is responsible for brelse()'ing the bp.
1584 nfsiodone_sync(struct bio
*bio
)
1587 bpdone(bio
->bio_buf
, 0);
1591 * nfs read rpc - BIO version
1594 nfs_readrpc_bio(struct vnode
*vp
, struct bio
*bio
)
1596 struct buf
*bp
= bio
->bio_buf
;
1598 struct nfsmount
*nmp
;
1599 int error
= 0, len
, tsiz
;
1600 struct nfsm_info
*info
;
1602 info
= kmalloc(sizeof(*info
), M_NFSREQ
, M_WAITOK
);
1604 info
->v3
= NFS_ISV3(vp
);
1606 nmp
= VFSTONFS(vp
->v_mount
);
1607 tsiz
= bp
->b_bcount
;
1608 KKASSERT(tsiz
<= nmp
->nm_rsize
);
1609 if (bio
->bio_offset
+ tsiz
> nmp
->nm_maxfilesize
) {
1613 nfsstats
.rpccnt
[NFSPROC_READ
]++;
1615 nfsm_reqhead(info
, vp
, NFSPROC_READ
,
1616 NFSX_FH(info
->v3
) + NFSX_UNSIGNED
* 3);
1617 ERROROUT(nfsm_fhtom(info
, vp
));
1618 tl
= nfsm_build(info
, NFSX_UNSIGNED
* 3);
1620 txdr_hyper(bio
->bio_offset
, tl
);
1621 *(tl
+ 2) = txdr_unsigned(len
);
1623 *tl
++ = txdr_unsigned(bio
->bio_offset
);
1624 *tl
++ = txdr_unsigned(len
);
1628 info
->done
= nfs_readrpc_bio_done
;
1629 nfsm_request_bio(info
, vp
, NFSPROC_READ
, NULL
,
1630 nfs_vpcred(vp
, ND_READ
));
1633 kfree(info
, M_NFSREQ
);
1634 bp
->b_error
= error
;
1635 bp
->b_flags
|= B_ERROR
;
1640 nfs_readrpc_bio_done(nfsm_info_t info
)
1642 struct nfsmount
*nmp
= VFSTONFS(info
->vp
->v_mount
);
1643 struct bio
*bio
= info
->bio
;
1644 struct buf
*bp
= bio
->bio_buf
;
1651 KKASSERT(info
->state
== NFSM_STATE_DONE
);
1654 ERROROUT(nfsm_postop_attr(info
, info
->vp
, &attrflag
,
1655 NFS_LATTR_NOSHRINK
));
1656 NULLOUT(tl
= nfsm_dissect(info
, 2 * NFSX_UNSIGNED
));
1657 eof
= fxdr_unsigned(int, *(tl
+ 1));
1659 ERROROUT(nfsm_loadattr(info
, info
->vp
, NULL
));
1662 NEGATIVEOUT(retlen
= nfsm_strsiz(info
, nmp
->nm_rsize
));
1663 ERROROUT(nfsm_mtobio(info
, bio
, retlen
));
1664 m_freem(info
->mrep
);
1668 * No error occured, if retlen is less then bcount and no EOF
1669 * and NFSv3 a zero-fill short read occured.
1671 * For NFSv2 a short-read indicates EOF.
1673 if (retlen
< bp
->b_bcount
&& info
->v3
&& eof
== 0) {
1674 bzero(bp
->b_data
+ retlen
, bp
->b_bcount
- retlen
);
1675 retlen
= bp
->b_bcount
;
1679 * If we hit an EOF we still zero-fill, but return the expected
1680 * b_resid anyway. This should normally not occur since async
1681 * BIOs are not used for read-before-write case. Races against
1682 * the server can cause it though and we don't want to leave
1683 * garbage in the buffer.
1685 if (retlen
< bp
->b_bcount
) {
1686 bzero(bp
->b_data
+ retlen
, bp
->b_bcount
- retlen
);
1689 /* bp->b_resid = bp->b_bcount - retlen; */
1691 kfree(info
, M_NFSREQ
);
1693 bp
->b_error
= error
;
1694 bp
->b_flags
|= B_ERROR
;
1700 * nfs write call - BIO version
1702 * NOTE: Caller has already busied the I/O.
1705 nfs_writerpc_bio(struct vnode
*vp
, struct bio
*bio
)
1707 struct nfsmount
*nmp
= VFSTONFS(vp
->v_mount
);
1708 struct nfsnode
*np
= VTONFS(vp
);
1709 struct buf
*bp
= bio
->bio_buf
;
1714 struct nfsm_info
*info
;
1718 * Setup for actual write. Just clean up the bio if there
1719 * is nothing to do. b_dirtyoff/end have already been staged
1720 * by the bp's pages getting busied.
1722 if (bio
->bio_offset
+ bp
->b_dirtyend
> np
->n_size
)
1723 bp
->b_dirtyend
= np
->n_size
- bio
->bio_offset
;
1725 if (bp
->b_dirtyend
<= bp
->b_dirtyoff
) {
1730 len
= bp
->b_dirtyend
- bp
->b_dirtyoff
;
1731 offset
= bio
->bio_offset
+ bp
->b_dirtyoff
;
1732 if (offset
+ len
> nmp
->nm_maxfilesize
) {
1733 bp
->b_flags
|= B_ERROR
;
1734 bp
->b_error
= EFBIG
;
1739 nfsstats
.write_bios
++;
1741 info
= kmalloc(sizeof(*info
), M_NFSREQ
, M_WAITOK
);
1743 info
->v3
= NFS_ISV3(vp
);
1744 info
->info_writerpc
.must_commit
= 0;
1745 if ((bp
->b_flags
& (B_NEEDCOMMIT
| B_NOCACHE
| B_CLUSTER
)) == 0)
1746 iomode
= NFSV3WRITE_UNSTABLE
;
1748 iomode
= NFSV3WRITE_FILESYNC
;
1750 KKASSERT(len
<= nmp
->nm_wsize
);
1752 nfsstats
.rpccnt
[NFSPROC_WRITE
]++;
1753 nfsm_reqhead(info
, vp
, NFSPROC_WRITE
,
1754 NFSX_FH(info
->v3
) + 5 * NFSX_UNSIGNED
+ nfsm_rndup(len
));
1755 ERROROUT(nfsm_fhtom(info
, vp
));
1757 tl
= nfsm_build(info
, 5 * NFSX_UNSIGNED
);
1758 txdr_hyper(offset
, tl
);
1760 *tl
++ = txdr_unsigned(len
);
1761 *tl
++ = txdr_unsigned(iomode
);
1762 *tl
= txdr_unsigned(len
);
1766 tl
= nfsm_build(info
, 4 * NFSX_UNSIGNED
);
1767 /* Set both "begin" and "current" to non-garbage. */
1768 x
= txdr_unsigned((u_int32_t
)offset
);
1769 *tl
++ = x
; /* "begin offset" */
1770 *tl
++ = x
; /* "current offset" */
1771 x
= txdr_unsigned(len
);
1772 *tl
++ = x
; /* total to this offset */
1773 *tl
= x
; /* size of this write */
1775 ERROROUT(nfsm_biotom(info
, bio
, bp
->b_dirtyoff
, len
));
1777 info
->done
= nfs_writerpc_bio_done
;
1778 nfsm_request_bio(info
, vp
, NFSPROC_WRITE
, NULL
,
1779 nfs_vpcred(vp
, ND_WRITE
));
1782 kfree(info
, M_NFSREQ
);
1783 bp
->b_error
= error
;
1784 bp
->b_flags
|= B_ERROR
;
1789 nfs_writerpc_bio_done(nfsm_info_t info
)
1791 struct nfsmount
*nmp
= VFSTONFS(info
->vp
->v_mount
);
1792 struct nfsnode
*np
= VTONFS(info
->vp
);
1793 struct bio
*bio
= info
->bio
;
1794 struct buf
*bp
= bio
->bio_buf
;
1795 int wccflag
= NFSV3_WCCRATTR
;
1796 int iomode
= NFSV3WRITE_FILESYNC
;
1800 int len
= bp
->b_resid
; /* b_resid was set to shortened length */
1805 * The write RPC returns a before and after mtime. The
1806 * nfsm_wcc_data() macro checks the before n_mtime
1807 * against the before time and stores the after time
1808 * in the nfsnode's cached vattr and n_mtime field.
1809 * The NRMODIFIED bit will be set if the before
1810 * time did not match the original mtime.
1812 wccflag
= NFSV3_WCCCHK
;
1813 ERROROUT(nfsm_wcc_data(info
, info
->vp
, &wccflag
));
1815 NULLOUT(tl
= nfsm_dissect(info
, 2 * NFSX_UNSIGNED
+ NFSX_V3WRITEVERF
));
1816 rlen
= fxdr_unsigned(int, *tl
++);
1819 m_freem(info
->mrep
);
1822 } else if (rlen
< len
) {
1825 * XXX what do we do here?
1827 backup
= len
- rlen
;
1828 uiop
->uio_iov
->iov_base
= (char *)uiop
->uio_iov
->iov_base
- backup
;
1829 uiop
->uio_iov
->iov_len
+= backup
;
1830 uiop
->uio_offset
-= backup
;
1831 uiop
->uio_resid
+= backup
;
1835 commit
= fxdr_unsigned(int, *tl
++);
1838 * Return the lowest committment level
1839 * obtained by any of the RPCs.
1841 if (iomode
== NFSV3WRITE_FILESYNC
)
1843 else if (iomode
== NFSV3WRITE_DATASYNC
&&
1844 commit
== NFSV3WRITE_UNSTABLE
)
1846 if ((nmp
->nm_state
& NFSSTA_HASWRITEVERF
) == 0){
1847 bcopy(tl
, (caddr_t
)nmp
->nm_verf
, NFSX_V3WRITEVERF
);
1848 nmp
->nm_state
|= NFSSTA_HASWRITEVERF
;
1849 } else if (bcmp(tl
, nmp
->nm_verf
, NFSX_V3WRITEVERF
)) {
1850 info
->info_writerpc
.must_commit
= 1;
1851 bcopy(tl
, (caddr_t
)nmp
->nm_verf
, NFSX_V3WRITEVERF
);
1855 ERROROUT(nfsm_loadattr(info
, info
->vp
, NULL
));
1857 m_freem(info
->mrep
);
1861 if (info
->vp
->v_mount
->mnt_flag
& MNT_ASYNC
)
1862 iomode
= NFSV3WRITE_FILESYNC
;
1866 * End of RPC. Now clean up the bp.
1868 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1869 * to cluster the buffers needing commit. This will allow
1870 * the system to submit a single commit rpc for the whole
1871 * cluster. We can do this even if the buffer is not 100%
1872 * dirty (relative to the NFS blocksize), so we optimize the
1873 * append-to-file-case.
1875 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1876 * cleared because write clustering only works for commit
1877 * rpc's, not for the data portion of the write).
1879 if (!error
&& iomode
== NFSV3WRITE_UNSTABLE
) {
1880 bp
->b_flags
|= B_NEEDCOMMIT
;
1881 if (bp
->b_dirtyoff
== 0 && bp
->b_dirtyend
== bp
->b_bcount
)
1882 bp
->b_flags
|= B_CLUSTEROK
;
1884 bp
->b_flags
&= ~(B_NEEDCOMMIT
| B_CLUSTEROK
);
1888 * For an interrupted write, the buffer is still valid
1889 * and the write hasn't been pushed to the server yet,
1890 * so we can't set B_ERROR and report the interruption
1891 * by setting B_EINTR. For the async case, B_EINTR
1892 * is not relevant, so the rpc attempt is essentially
1893 * a noop. For the case of a V3 write rpc not being
1894 * committed to stable storage, the block is still
1895 * dirty and requires either a commit rpc or another
1896 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1897 * the block is reused. This is indicated by setting
1898 * the B_DELWRI and B_NEEDCOMMIT flags.
1900 * If the buffer is marked B_PAGING, it does not reside on
1901 * the vp's paging queues so we cannot call bdirty(). The
1902 * bp in this case is not an NFS cache block so we should
1905 if (error
== EINTR
|| (!error
&& (bp
->b_flags
& B_NEEDCOMMIT
))) {
1907 bp
->b_flags
&= ~(B_INVAL
|B_NOCACHE
);
1908 if ((bp
->b_flags
& B_PAGING
) == 0)
1911 bp
->b_flags
|= B_EINTR
;
1915 bp
->b_flags
|= B_ERROR
;
1916 bp
->b_error
= np
->n_error
= error
;
1917 np
->n_flag
|= NWRITEERR
;
1919 bp
->b_dirtyoff
= bp
->b_dirtyend
= 0;
1921 if (info
->info_writerpc
.must_commit
)
1922 nfs_clearcommit(info
->vp
->v_mount
);
1923 kfree(info
, M_NFSREQ
);
1925 bp
->b_flags
|= B_ERROR
;
1926 bp
->b_error
= error
;
1932 * Nfs Version 3 commit rpc - BIO version
1934 * This function issues the commit rpc and will chain to a write
1938 nfs_commitrpc_bio(struct vnode
*vp
, struct bio
*bio
)
1940 struct nfsmount
*nmp
= VFSTONFS(vp
->v_mount
);
1941 struct buf
*bp
= bio
->bio_buf
;
1942 struct nfsm_info
*info
;
1946 if ((nmp
->nm_state
& NFSSTA_HASWRITEVERF
) == 0) {
1947 bp
->b_dirtyoff
= bp
->b_dirtyend
= 0;
1948 bp
->b_flags
&= ~(B_NEEDCOMMIT
| B_CLUSTEROK
);
1954 info
= kmalloc(sizeof(*info
), M_NFSREQ
, M_WAITOK
);
1958 nfsstats
.rpccnt
[NFSPROC_COMMIT
]++;
1959 nfsm_reqhead(info
, vp
, NFSPROC_COMMIT
, NFSX_FH(1));
1960 ERROROUT(nfsm_fhtom(info
, vp
));
1961 tl
= nfsm_build(info
, 3 * NFSX_UNSIGNED
);
1962 txdr_hyper(bio
->bio_offset
+ bp
->b_dirtyoff
, tl
);
1964 *tl
= txdr_unsigned(bp
->b_dirtyend
- bp
->b_dirtyoff
);
1966 info
->done
= nfs_commitrpc_bio_done
;
1967 nfsm_request_bio(info
, vp
, NFSPROC_COMMIT
, NULL
,
1968 nfs_vpcred(vp
, ND_WRITE
));
1972 * Chain to write RPC on (early) error
1974 kfree(info
, M_NFSREQ
);
1975 nfs_writerpc_bio(vp
, bio
);
1979 nfs_commitrpc_bio_done(nfsm_info_t info
)
1981 struct nfsmount
*nmp
= VFSTONFS(info
->vp
->v_mount
);
1982 struct bio
*bio
= info
->bio
;
1983 struct buf
*bp
= bio
->bio_buf
;
1985 int wccflag
= NFSV3_WCCRATTR
;
1988 ERROROUT(nfsm_wcc_data(info
, info
->vp
, &wccflag
));
1990 NULLOUT(tl
= nfsm_dissect(info
, NFSX_V3WRITEVERF
));
1991 if (bcmp(nmp
->nm_verf
, tl
, NFSX_V3WRITEVERF
)) {
1992 bcopy(tl
, nmp
->nm_verf
, NFSX_V3WRITEVERF
);
1993 error
= NFSERR_STALEWRITEVERF
;
1996 m_freem(info
->mrep
);
2000 * On completion we must chain to a write bio if an
2004 kfree(info
, M_NFSREQ
);
2006 bp
->b_dirtyoff
= bp
->b_dirtyend
= 0;
2007 bp
->b_flags
&= ~(B_NEEDCOMMIT
| B_CLUSTEROK
);
2011 nfs_writerpc_bio(info
->vp
, bio
);